Guest User

Untitled

a guest
Jan 22nd, 2019
97
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.13 KB | None | 0 0
  1. #
  2. # This is an example YAML profile for cassandra-stress
  3. #
  4. # The general form of the command line is as follows:
  5. #
  6. # cassandra-stress user profile=<profile.yaml> ops([insert|<read-op>]=<op-ratio>, ...) n=<partition-ops>
  7. #
  8. # cassandra-stress will then run multiple parallel consumers (controlled by the
  9. # -rate threads=<consumers> option):
  10. #
  11. # * Each consumer draws an operation at random from the list of ops;
  12. # * The distribution of the ops is controlled by the <op-ratio> parameter;
  13. # * Each op will then generate cassandra queries. When limited by the
  14. # <partition-ops> value:
  15. # * Each <read-op> will generate a single SELECT query, decrementing
  16. # <partition-ops> by 1;
  17. # * Each insert will generate multiple UPDATE queries, decrementing
  18. # <partition-ops> by the number of unique partitions inserted;
  19. # * When <partition-ops> is exhausted, cassandra-stress stops.
  20.  
  21. # insert data
  22. # cassandra-stress user profile=/home/jake/stress1.yaml ops(insert=1)
  23. #
  24. # read, using query simple1:
  25. # cassandra-stress profile=/home/jake/stress1.yaml ops(simple1=1)
  26. #
  27. # mixed workload (90/10)
  28. # cassandra-stress user profile=/home/jake/stress1.yaml ops(insert=1,simple1=9)
  29.  
  30. #
  31. # Keyspace info
  32. #
  33. keyspace: stresscql
  34.  
  35. #
  36. # The CQL for creating a keyspace (optional if it already exists)
  37. #
  38. keyspace_definition: |
  39. CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
  40.  
  41. #
  42. # Table info
  43. #
  44. table: typestest
  45.  
  46. #
  47. # The CQL for creating a table you wish to stress (optional if it already exists)
  48. #
  49. table_definition: |
  50. CREATE TABLE typestest (
  51. name text,
  52. choice boolean,
  53. date timestamp,
  54. address inet,
  55. dbl double,
  56. lval bigint,
  57. ival int,
  58. uid timeuuid,
  59. value blob,
  60. PRIMARY KEY((name,choice), date, address, dbl, lval, ival, uid)
  61. )
  62. WITH compaction = { 'class':'LeveledCompactionStrategy' }
  63. # AND compression = { 'sstable_compression' : '' }
  64. # AND comment='A table of many types to test wide rows'
  65.  
  66. #
  67. # Optional meta information on the generated columns in the above table
  68. # The min and max only apply to text and blob types
  69. # The distribution field represents the total unique population
  70. # distribution of that column across rows. Supported types are
  71. #
  72. # EXP(min..max) An exponential distribution over the range [min..max]
  73. # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max]
  74. # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng
  75. # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev
  76. # UNIFORM(min..max) A uniform distribution over the range [min, max]
  77. # FIXED(val) A fixed distribution, always returning the same value
  78. # SEQ(min..max) A fixed sequence, returning values in the range min to max sequentially (starting based on seed), wrapping if necessary.
  79. # Aliases: extr, gauss, normal, norm, weibull
  80. #
  81. # If preceded by ~, the distribution is inverted
  82. #
  83. # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)
  84. #
  85. columnspec:
  86. - name: name
  87. size: uniform(1..10)
  88. population: uniform(1..1B) # the range of unique values to select for the field (default is 100Billion)
  89. - name: date
  90. cluster: uniform(20..40)
  91. - name: lval
  92. population: gaussian(1..1000)
  93. cluster: uniform(1..4)
  94.  
  95.  
  96. # The insert operation
  97. insert:
  98. partitions: uniform(1..50) # Number of unique partitions to update in a single insert op.
  99. # Defaults to fixed(1)
  100. partitions-per-batch: MULTIPLE # SINGLE or MULTIPLE partitions per-batch; multiple partitions in a
  101. # single batch is a pessimization, but it's allowed. Defaults to SINGLE.
  102. max-rows-per-batch: uniform(1..100)
  103. # Maximum size of a batch. Rows are inserted in batches of up to
  104. # max-rows-per-batch, and after each batch is sent max-rows-per-batch
  105. # is regenerated from this distribution. If the generated value is 0,
  106. # then each partition (for SINGLE partitions-per-batch) or all partitions
  107. # (for MULTIPLE partitions-per-batch) is inserted in a single batch (so if you
  108. # always require this behaviour, use fixed(0)). Defaults to fixed(100).
  109. batchtype: LOGGED # Type of batch to use: LOGGED, UNLOGGED or COUNTER
  110. select: uniform(1..10)/10 # Proportion of rows that will be generated in each partition. The number of rows
  111. # per-partition will be determined by <select-value> * <partition-size>, where
  112. # <partition-size> is the number of possible rows in a partition, as determined by
  113. # the columns with cluster keys in the columnspec above. This will be
  114. # generated for each partition in a single insert op.
  115. # Defaults to fixed(1)/1
  116. row-population: fixed(1)/1 # Proportion of populated columns in a row.
  117. # Defaults to fixed(1)/1
  118. #
  119. # A list of queries you wish to run against the schema
  120. #
  121. queries:
  122. simple1:
  123. cql: select * from typestest where name = ? and choice = ? LIMIT 100
  124. fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
  125. range1:
  126. cql: select * from typestest where name = ? and choice = ? and date >= ? LIMIT 100
  127. fields: multirow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition)
  128.  
  129.  
  130. #
  131. # A list of bulk read queries that analytics tools may perform against the schema
  132. # Each query will sweep an entire token range, page by page.
  133. #
  134. token_range_queries:
  135. all_columns_tr_query:
  136. columns: '*'
  137. page_size: 5000
  138.  
  139. value_tr_query:
  140. columns: value
Add Comment
Please, Sign In to add comment