Guest User

Untitled

a guest
Nov 8th, 2020 (edited)
179
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 12.07 KB | None | 0 0
  1. # begin crush map
  2. tunable choose_local_tries 0
  3. tunable choose_local_fallback_tries 0
  4. tunable choose_total_tries 50
  5. tunable chooseleaf_descend_once 1
  6. tunable chooseleaf_vary_r 1
  7. tunable chooseleaf_stable 1
  8. tunable straw_calc_version 1
  9. tunable allowed_bucket_algs 54
  10.  
  11. # devices
  12. device 0 osd.0 class hdd
  13. device 1 osd.1 class hdd
  14. device 2 osd.2 class hdd
  15. device 3 osd.3 class ssd
  16. device 4 osd.4 class hdd
  17. device 5 osd.5 class hdd
  18. device 6 osd.6 class hdd
  19. device 7 osd.7 class hdd
  20. device 8 osd.8 class nvme
  21. device 9 osd.9 class hdd
  22. device 10 osd.10 class ssd
  23. device 11 osd.11 class ssd
  24. device 12 osd.12 class ssd
  25. device 13 osd.13 class nvme
  26. device 14 osd.14 class ssd
  27. device 15 osd.15 class ssd
  28. device 16 osd.16 class nvme
  29. device 17 osd.17 class hdd
  30.  
  31. # types
  32. type 0 osd
  33. type 1 host
  34. type 2 chassis
  35. type 3 rack
  36. type 4 row
  37. type 5 pdu
  38. type 6 pod
  39. type 7 room
  40. type 8 datacenter
  41. type 9 region
  42. type 10 root
  43.  
  44. # buckets
  45. host pve23 {
  46. id -13 # do not change unnecessarily
  47. id -14 class hdd # do not change unnecessarily
  48. id -15 class ssd # do not change unnecessarily
  49. id -10 class nvme # do not change unnecessarily
  50. # weight 19.738
  51. alg straw2
  52. hash 0 # rjenkins1
  53. item osd.2 weight 2.729
  54. item osd.3 weight 0.109
  55. item osd.14 weight 0.109
  56. item osd.0 weight 3.639
  57. item osd.4 weight 1.364
  58. item osd.9 weight 10.914
  59. item osd.13 weight 0.873
  60. }
  61. host pve22 {
  62. id -6 # do not change unnecessarily
  63. id -7 class hdd # do not change unnecessarily
  64. id -8 class ssd # do not change unnecessarily
  65. id -11 class nvme # do not change unnecessarily
  66. # weight 18.373
  67. alg straw2
  68. hash 0 # rjenkins1
  69. item osd.10 weight 0.109
  70. item osd.11 weight 0.109
  71. item osd.5 weight 2.729
  72. item osd.17 weight 3.639
  73. item osd.1 weight 10.914
  74. item osd.16 weight 0.873
  75. }
  76. host pve21 {
  77. id -2 # do not change unnecessarily
  78. id -3 class hdd # do not change unnecessarily
  79. id -5 class ssd # do not change unnecessarily
  80. id -12 class nvme # do not change unnecessarily
  81. # weight 14.735
  82. alg straw2
  83. hash 0 # rjenkins1
  84. item osd.12 weight 0.109
  85. item osd.15 weight 0.109
  86. item osd.6 weight 2.729
  87. item osd.7 weight 10.914
  88. item osd.8 weight 0.873
  89. }
  90. root default {
  91. id -1 # do not change unnecessarily
  92. id -4 class hdd # do not change unnecessarily
  93. id -9 class ssd # do not change unnecessarily
  94. id -16 class nvme # do not change unnecessarily
  95. # weight 52.846
  96. alg straw2
  97. hash 0 # rjenkins1
  98. item pve23 weight 19.738
  99. item pve22 weight 18.373
  100. item pve21 weight 14.735
  101. }
  102.  
  103. # rules
  104. rule replicated_rule {
  105. id 0
  106. type replicated
  107. min_size 1
  108. max_size 10
  109. step take default class hdd
  110. step chooseleaf firstn 0 type host
  111. step emit
  112. }
  113. rule ssd_ruleset {
  114. id 1
  115. type replicated
  116. min_size 1
  117. max_size 10
  118. step take default class ssd
  119. step chooseleaf firstn 0 type host
  120. step emit
  121. }
  122. rule nvme_rule {
  123. id 2
  124. type replicated
  125. min_size 1
  126. max_size 10
  127. step take default class nvme
  128. step chooseleaf firstn 0 type host
  129. step emit
  130. }
  131.  
  132. # end crush map
  133.  
  134. [global]
  135. auth_client_required = cephx
  136. auth_cluster_required = cephx
  137. auth_service_required = cephx
  138. #bluestore_block_db_size = 13106127360
  139. #bluestore_block_wal_size = 13106127360
  140. cluster_network = 172.16.1.0/16
  141. debug_asok = 0/0
  142. debug_auth = 0/0
  143. debug_buffer = 0/0
  144. debug_client = 0/0
  145. debug_context = 0/0
  146. debug_crush = 0/0
  147. debug_filer = 0/0
  148. debug_filestore = 0/0
  149. debug_finisher = 0/0
  150. debug_heartbeatmap = 0/0
  151. debug_journal = 0/0
  152. debug_journaler = 0/0
  153. debug_lockdep = 0/0
  154. debug_mds = 0/0
  155. debug_mds_balancer = 0/0
  156. debug_mds_locker = 0/0
  157. debug_mds_log = 0/0
  158. debug_mds_log_expire = 0/0
  159. debug_mds_migrator = 0/0
  160. debug_mon = 0/0
  161. debug_monc = 0/0
  162. debug_ms = 0/0
  163. debug_objclass = 0/0
  164. debug_objectcacher = 0/0
  165. debug_objecter = 0/0
  166. debug_optracker = 0/0
  167. debug_osd = 0/0
  168. debug_paxos = 0/0
  169. debug_perfcounter = 0/0
  170. debug_rados = 0/0
  171. debug_rbd = 0/0
  172. debug_rgw = 0/0
  173. debug_throttle = 0/0
  174. debug_timer = 0/0
  175. debug_tp = 0/0
  176. fsid = e44fbe1c-b1c7-481d-bd25-dc595eae2d13
  177. mon_allow_pool_delete = true
  178. mon_host = 192.168.1.21 192.168.1.22 192.168.1.23
  179. mon_max_pg_per_osd = 500
  180. mon_osd_allow_primary_affinity = true
  181. osd_journal_size = 28120
  182. osd_max_backfills = 5
  183. osd_max_pg_per_osd_hard_ratio = 3
  184. osd_pool_default_min_size = 2
  185. osd_pool_default_size = 3
  186. osd_recovery_max_active = 6
  187. osd_recovery_op_priority = 3
  188. osd_scrub_auto_repair = true
  189. osd_scrub_begin_hour = 1
  190. osd_scrub_end_hour = 8
  191. osd_scrub_sleep = 0.1
  192. public_network = 192.168.1.0/24
  193. rbd_cache = true
  194. bluestore_cache_size_hdd = 2147483648 # 2G
  195. bluestore_default_buffered_write = true # BlueStore has the ability to perform buffered writes. Buffered writes enable populating the read cache during the write process. This setting, in effect, changes the BlueStore cache into a write-through cache.
  196. # It is advised that spinning media continue to use 64 kB while SSD/NVMe are likely to benefit from setting to 4 kB.
  197. min_alloc_size_ssd=4096
  198. min_alloc_size_hdd=65536
  199. # https://yourcmc.ru/wiki/Ceph_performance
  200. bluefs_preextend_wal_files = true
  201. cephx_require_signatures = true
  202. cephx_cluster_require_signatures = true
  203. cephx_sign_messages = true
  204.  
  205. [client]
  206. client_reconnect_stale = true
  207. keyring = /etc/pve/priv/$cluster.$name.keyring
  208.  
  209. [mds]
  210. keyring = /var/lib/ceph/mds/ceph-$id/keyring
  211. mds_data = /var/lib/ceph/mds/ceph-$id
  212.  
  213. [mon]
  214. mon_compact_on_start = true
  215. mon_compact_on_trim = true
  216.  
  217. [osd]
  218. filestore_xattr_use_omap = true
  219. keyring = /var/lib/ceph/osd/ceph-$id/keyring
  220. osd_crush_update_on_start = true
  221.  
  222. [mds.pve23]
  223. host = 192.168.1.23
  224.  
  225. [mds.pve21]
  226. host = 192.168.1.21
  227.  
  228. [mds.pve22]
  229. host = 192.168.1.22
  230.  
  231. root@pve21:~# rados bench -p zfs_cache 30 write --no-cleanup
  232. hints = 1
  233. Maintaining 16 concurrent writes of 4194304 bytes to objects of size 4194304 for up to 30 seconds or 0 objects
  234. Object prefix: benchmark_data_pve21_3994238
  235. sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s)
  236. 0 0 0 0 0 0 - 0
  237. 1 16 51 35 139.992 140 0.209004 0.282963
  238. 2 16 87 71 141.989 144 0.339198 0.37351
  239. 3 16 124 108 143.99 148 0.854306 0.395849
  240. 4 16 157 141 140.989 132 0.877978 0.409175
  241. 5 16 193 177 141.589 144 0.0135585 0.421509
  242. 6 16 224 208 138.654 124 0.689556 0.437592
  243. 7 16 262 246 140.559 152 0.604785 0.435107
  244. 8 16 295 279 139.487 132 0.0949523 0.438708
  245. 9 16 332 316 140.431 148 0.842605 0.439193
  246. 10 16 366 350 139.986 136 0.0140917 0.442065
  247. 11 16 397 381 138.532 124 0.679741 0.449535
  248. 12 16 431 415 138.319 136 0.674606 0.451588
  249. 13 16 473 457 140.601 168 0.0133488 0.44542
  250. 14 16 511 495 141.414 152 0.292574 0.441756
  251. 15 16 545 529 141.052 136 0.923989 0.441713
  252. 16 16 593 577 144.235 192 0.012868 0.435535
  253. 17 16 631 615 144.691 152 0.0129975 0.43247
  254. 18 16 670 654 145.319 156 0.0145094 0.431384
  255. 19 16 705 689 145.038 140 0.818859 0.432818
  256. 2020-11-05 20:42:52.172037 min lat: 0.0126369 max lat: 0.999587 avg lat: 0.434105
  257. sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s)
  258. 20 16 743 727 145.385 152 0.60138 0.434105
  259. 21 16 778 762 145.128 140 0.316159 0.433479
  260. 22 16 818 802 145.803 160 0.334659 0.431086
  261. 23 16 849 833 144.854 124 0.902827 0.434864
  262. 24 16 881 865 144.152 128 0.362459 0.437654
  263. 25 16 912 896 143.345 124 0.776994 0.440992
  264. 26 16 955 939 144.447 172 0.289116 0.437467
  265. 27 16 994 978 144.874 156 0.774429 0.436769
  266. 28 16 1035 1019 145.556 164 0.528945 0.435309
  267. 29 16 1073 1057 145.778 152 0.796811 0.433397
  268. 30 16 1114 1098 146.385 164 0.733767 0.433262
  269. Total time run: 30.6479
  270. Total writes made: 1115
  271. Write size: 4194304
  272. Object size: 4194304
  273. Bandwidth (MB/sec): 145.524
  274. Stddev Bandwidth: 16.1578
  275. Max bandwidth (MB/sec): 192
  276. Min bandwidth (MB/sec): 124
  277. Average IOPS: 36
  278. Stddev IOPS: 4.03946
  279. Max IOPS: 48
  280. Min IOPS: 31
  281. Average Latency(s): 0.43931
  282. Stddev Latency(s): 0.316067
  283. Max latency(s): 1.15206
  284. Min latency(s): 0.0124192
  285.  
  286. root@pve21:~# rados bench -p zfs_cache 30 seq
  287. hints = 1
  288. sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s)
  289. 0 0 0 0 0 0 - 0
  290. 1 16 51 35 139.976 140 0.152329 0.261514
  291. 2 16 89 73 145.98 152 0.967852 0.346284
  292. 3 16 125 109 145.316 144 0.929344 0.383067
  293. 4 16 160 144 143.983 140 0.142341 0.395781
  294. 5 16 194 178 142.384 136 0.826413 0.408423
  295. 6 16 226 210 139.984 128 0.142744 0.424383
  296. 7 16 265 249 142.27 156 0.00575118 0.426178
  297. 8 16 298 282 140.985 132 0.720539 0.435783
  298. 9 16 334 318 141.318 144 0.872076 0.433645
  299. 10 16 368 352 140.785 136 0.872145 0.435896
  300. 11 16 399 383 139.258 124 0.142806 0.439325
  301. 12 16 434 418 139.319 140 0.781405 0.445867
  302. 13 16 477 461 141.832 172 0.889309 0.437065
  303. 14 16 513 497 141.986 144 0.9639 0.436264
  304. 15 16 548 532 141.853 140 0.229899 0.437694
  305. 16 16 597 581 145.235 196 0.217501 0.430265
  306. 17 16 638 622 146.338 164 0.00815341 0.424903
  307. 18 16 674 658 146.207 144 0.787708 0.427804
  308. 19 16 711 695 146.3 148 0.00604705 0.426704
  309. 2020-11-05 20:49:50.333587 min lat: 0.00335944 max lat: 1.09719 avg lat: 0.426499
  310. sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s)
  311. 20 16 751 735 146.985 160 0.139573 0.426499
  312. 21 16 787 771 146.841 144 0.00424377 0.428744
  313. 22 16 824 808 146.893 148 0.00610794 0.428274
  314. 23 16 854 838 145.724 120 0.0777444 0.430503
  315. 24 16 887 871 145.151 132 0.0738695 0.432835
  316. 25 16 917 901 144.145 120 0.0736379 0.436277
  317. 26 16 962 946 145.523 180 0.142901 0.43178
  318. 27 16 1006 990 146.651 176 0.141083 0.428757
  319. 28 16 1046 1030 147.127 160 0.727332 0.428981
  320. 29 16 1083 1067 147.157 148 0.0791994 0.427605
  321. 30 15 1115 1100 146.652 132 0.226387 0.427832
  322. Total time run: 30.3715
  323. Total reads made: 1115
  324. Read size: 4194304
  325. Object size: 4194304
  326. Bandwidth (MB/sec): 146.848
  327. Average IOPS: 36
  328. Stddev IOPS: 4.45153
  329. Max IOPS: 49
  330. Min IOPS: 30
  331. Average Latency(s): 0.434557
  332. Max latency(s): 1.49625
  333. Min latency(s): 0.00334497
Add Comment
Please, Sign In to add comment