Guest User

Ceph

a guest
Sep 4th, 2018
185
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 8.17 KB | None | 0 0
  1. HW:
  2. 3x z400 x 24gb RAM x Xeon W3680/Xeon W3520/Xeon W3520
  3.  
  4. pve11 .11
  5. --------------------------------------------------------------
  6. SATA1 ‘sda’ - OS - 300GB
  7. SATA2 ‘sdb’ *osd7->sdf1 - WD RED 0JJ7 - 6TB
  8. SATA3 ‘sdc’ *osd8->sdf2 - Hitachi GLGL - 500GB
  9. SATA4 ‘sdd’ *osd12->sdf3- Seagate 5428 - 600gb
  10. SATA5 -
  11. SATA6 -
  12. SATACARD1 ’sde’ *osd13 -> sdf4- Intel (264mb/sec) - 160GB SSD Cache pool
  13. SATACARD2 ’sdf’ - Kingston v300 F6E5 (470mb/sec) - 120GB SSD Journals
  14. SATACARD3 ’sdg’ *osd14 - Intel 03GN (419mb/sec)- 240GB SSD Cache pool
  15. SATACARD4 -
  16. --------------------------------------------------------------
  17. 7.x TB + 640 GB SSD
  18. RAM: 4gbx1333/4gbx1333/4gbx1333/4gbx1333/2gbx1333/2gbx1333/
  19.  
  20. pve .12 (1 IDE free)
  21. --------------------------------------------------------------
  22. SATA1 ’sda’ - OS - 300GB
  23. SATA2 ’sdb’ *osd2->sdg13 - WD BLUE R36KV - 4TB
  24. SATA3 ’sdc’ *osd4->sdg8 - Hitachi PHZ8L - 500GB
  25. SATA4 -
  26. SATA5 ’sde’ *osd10->sdg11 - Toshiba 0BZS - 640GB
  27. SATA6 ’sdf’ *osd5->sdg12 - Samsung 0311 - 500GB
  28. SATACARD1 ’sdh’ - Kingston v300 F76F (372mb/sec) - 120G SSD Journals
  29. SATACARD2 ‘sdg' *osd16->sdg10 - Intel SSD 1207GN (415MB/sec) - 120GB SSD Cache pool
  30. SATACARD3 ’sdf’ *osd11->sdg9 - Micron SSD 1240BB (342mb/sec) - 120GB SSD Cache pool
  31. SATACARD4 -
  32. --------------------------------------------------------------
  33. 5 TB + 360 GB SSD
  34. RAM: 4gbx1333/4gbx1333/4gbx1333/4gbx1333/<free>
  35.  
  36. pve3 .13 (1 SATA power from IDE-SATA splitter)
  37. --------------------------------------------------------------
  38. SATA1 ’sda’ - OS - 300GB
  39. SATA1 ’sdb’ *osd3->sdf19 - WD BLUE LLKYHZ- 4TB
  40. SATA3 ’sdc' *osd0->sdf15 - ST9500420AS / - 500gb
  41. SATA4 ’sdd' *osd1->sdf16 - Hitachi DE1XMX- 500GB
  42. SATA5 ’sde’ *osd6->sdf17 - Samsung ST500/- 500GB
  43. SATA6 -
  44. SATACARD1 ’sdh’ osd9->sdf18 - Samsung EVO 325T (458MB/sec) - 120GB SSD Cache pool
  45. SATACARD2 ‘sdg' - Kingston v300 0318DA (429MB/sec) - 120G SSD Journals
  46. SATACARD3 ’sdf’ *osd15->sdg14 - Intel (251mb/sec) - 160GB SSD Cache pool
  47. SATACARD4 -
  48. --------------------------------------------------------------
  49.  
  50.  
  51. ceph.conf
  52. [global]
  53. auth client required = cephx
  54. auth cluster required = cephx
  55. auth service required = cephx
  56. cluster network = 172.16.1.0/24
  57. mon_max_pg_per_osd = 500
  58. osd_max_pg_per_osd_hard_ratio = 3
  59. osd_max_backfills = 5
  60. osd_recovery_max_active = 5
  61. bluestore_block_db_size = 16106127360
  62. bluestore_block_wal_size = 16106127360
  63. rbd_cache = true
  64. osd scrub begin hour = 1
  65. osd scrub end hour = 8
  66. osd scrub sleep = 0.1
  67.  
  68. debug asok = 0/0
  69. debug auth = 0/0
  70. debug buffer = 0/0
  71. debug client = 0/0
  72. debug context = 0/0
  73. debug crush = 0/0
  74. debug filer = 0/0
  75. debug filestore = 0/0
  76. debug finisher = 0/0
  77. debug heartbeatmap = 0/0
  78. debug journal = 0/0
  79. debug journaler = 0/0
  80. debug lockdep = 0/0
  81. debug mds = 0/0
  82. debug mds balancer = 0/0
  83. debug mds locker = 0/0
  84. debug mds log = 0/0
  85. debug mds log expire = 0/0
  86. debug mds migrator = 0/0
  87. debug mon = 0/0
  88. debug monc = 0/0
  89. debug ms = 0/0
  90. debug objclass = 0/0
  91. debug objectcacher = 0/0
  92. debug objecter = 0/0
  93. debug optracker = 0/0
  94. debug osd = 0/0
  95. debug paxos = 0/0
  96. debug perfcounter = 0/0
  97. debug rados = 0/0
  98. debug rbd = 0/0
  99. debug rgw = 0/0
  100. debug throttle = 0/0
  101. debug timer = 0/0
  102. debug tp = 0/0
  103. fsid = e44fbe1c-b1c7-481d-bd25-dc595eae2d13
  104. keyring = /etc/pve/priv/$cluster.$name.keyring
  105. mon allow pool delete = true
  106. mon osd allow primary affinity = true
  107. osd journal size = 28120
  108. osd pool default min size = 2
  109. osd pool default size = 3
  110. public network = 192.168.1.0/24
  111.  
  112. [osd]
  113. filestore xattr use omap = true
  114. keyring = /var/lib/ceph/osd/ceph-$id/keyring
  115. osd crush update on start = false
  116. [mon.pve]
  117. host = pve
  118. mon addr = 192.168.1.12:6789
  119.  
  120. [mon.pve11]
  121. host = pve11
  122. mon addr = 192.168.1.11:6789
  123.  
  124. [mon.pve3]
  125. host = pve3
  126. mon addr = 192.168.1.13:6789
  127.  
  128. ---root@pve:~# ceph -v
  129. ceph version 12.2.7 (94ce186ac93bb28c3c444bccfefb8a31eb0748e4) luminous (stable)
  130. root@pve:~# pveversion
  131. pve-manager/5.2-8/fdf39912 (running kernel: 4.15.18-3-pve)
  132.  
  133. root@pve:~# ceph df
  134. GLOBAL:
  135. SIZE AVAIL RAW USED %RAW USED
  136. 17890G 8604G 9285G 51.90
  137. POOLS:
  138. NAME ID USED %USED MAX AVAIL OBJECTS
  139. ceph_pool 11 3357G 63.49 1931G 895029
  140. cache_pool 16 54066M 20.97 198G 17051
  141. cephfs_data 21 0 0 1931G 0
  142. cephfs_metadata 22 0 0 1931G 0
  143.  
  144. root@pve:~# ceph osd tree
  145. ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
  146. -1 17.47096 root default
  147. -3 5.35599 host pve
  148. 2 hdd 3.63899 osd.2 up 1.00000 0
  149. 4 hdd 0.45499 osd.4 up 1.00000 1.00000
  150. 5 hdd 0.45499 osd.5 up 1.00000 1.00000
  151. 10 hdd 0.58199 osd.10 up 1.00000 0
  152. 11 ssd 0.11600 osd.11 up 1.00000 1.00000
  153. 16 ssd 0.10899 osd.16 up 1.00000 1.00000
  154. -10 6.85799 host pve11
  155. 7 hdd 5.45799 osd.7 up 1.00000 0
  156. 8 hdd 0.45499 osd.8 up 1.00000 1.00000
  157. 12 hdd 0.58199 osd.12 up 1.00000 0.79999
  158. 13 ssd 0.14499 osd.13 up 1.00000 1.00000
  159. 14 ssd 0.21799 osd.14 up 1.00000 1.00000
  160. -7 5.25699 host pve3
  161. 0 hdd 0.45499 osd.0 up 1.00000 1.00000
  162. 1 hdd 0.45499 osd.1 up 1.00000 1.00000
  163. 3 hdd 3.63899 osd.3 up 1.00000 0
  164. 6 hdd 0.45499 osd.6 up 1.00000 1.00000
  165. 9 ssd 0.10899 osd.9 up 1.00000 1.00000
  166. 15 ssd 0.14499 osd.15 up 1.00000 1.00000
  167.  
  168. root@pve:~/proxmox_custom_conf# ceph-disk list
  169. /dev/sda :
  170. /dev/sda1 other, 21686148-6449-6e6f-744e-656564454649
  171. /dev/sda2 other, vfat
  172. /dev/sda3 other, LVM2_member
  173. /dev/sdb :
  174. /dev/sdb1 ceph data, active, cluster ceph, osd.2, block /dev/sdb2, block.db /dev/sdh13
  175. /dev/sdb2 ceph block, for /dev/sdb1
  176. /dev/sdc :
  177. /dev/sdc1 ceph data, active, cluster ceph, osd.4, block /dev/sdc2, block.db /dev/sdh8
  178. /dev/sdc2 ceph block, for /dev/sdc1
  179. /dev/sdd :
  180. /dev/sdd1 ceph data, active, cluster ceph, osd.10, block /dev/sdd2, block.db /dev/sdh11
  181. /dev/sdd2 ceph block, for /dev/sdd1
  182. /dev/sde :
  183. /dev/sde1 ceph data, active, cluster ceph, osd.5, block /dev/sde2, block.db /dev/sdh12
  184. /dev/sde2 ceph block, for /dev/sde1
  185. /dev/sdf :
  186. /dev/sdf1 ceph data, active, cluster ceph, osd.11, block /dev/sdf2, block.db /dev/sdh9
  187. /dev/sdf2 ceph block, for /dev/sdf1
  188. /dev/sdg :
  189. /dev/sdg1 ceph data, active, cluster ceph, osd.16, block /dev/sdg2, block.db /dev/sdh10
  190. /dev/sdg2 ceph block, for /dev/sdg1
  191. /dev/sdh :
  192. /dev/sdh10 ceph block.db, for /dev/sdg1
  193. /dev/sdh11 ceph block.db, for /dev/sdd1
  194. /dev/sdh12 ceph block.db, for /dev/sde1
  195. /dev/sdh13 ceph block.db, for /dev/sdb1
  196. /dev/sdh8 ceph block.db, for /dev/sdc1
  197. /dev/sdh9 ceph block.db, for /dev/sdf1
  198.  
  199. Model: ATA KINGSTON SV300S3 (scsi)
  200. Disk /dev/sdh: 120GB
  201. Sector size (logical/physical): 512B/4096B
  202. Partition Table: gpt
  203. Disk Flags:
  204.  
  205. Number Start End Size File system Name Flags
  206. 8 7517MB 23.6GB 16.1GB ceph block.db
  207. 9 23.6GB 39.7GB 16.1GB ceph block.db
  208. 10 39.7GB 55.8GB 16.1GB ceph block.db
  209. 11 55.8GB 71.9GB 16.1GB ceph block.db
  210. 12 71.9GB 88.0GB 16.1GB ceph block.db
  211. 13 88.0GB 104GB 16.1GB ceph block.db
Advertisement
Add Comment
Please, Sign In to add comment