Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- HW:
- 3x z400 x 24gb RAM x Xeon W3680/Xeon W3520/Xeon W3520
- pve11 .11
- --------------------------------------------------------------
- SATA1 ‘sda’ - OS - 300GB
- SATA2 ‘sdb’ *osd7->sdf1 - WD RED 0JJ7 - 6TB
- SATA3 ‘sdc’ *osd8->sdf2 - Hitachi GLGL - 500GB
- SATA4 ‘sdd’ *osd12->sdf3- Seagate 5428 - 600gb
- SATA5 -
- SATA6 -
- SATACARD1 ’sde’ *osd13 -> sdf4- Intel (264mb/sec) - 160GB SSD Cache pool
- SATACARD2 ’sdf’ - Kingston v300 F6E5 (470mb/sec) - 120GB SSD Journals
- SATACARD3 ’sdg’ *osd14 - Intel 03GN (419mb/sec)- 240GB SSD Cache pool
- SATACARD4 -
- --------------------------------------------------------------
- 7.x TB + 640 GB SSD
- RAM: 4gbx1333/4gbx1333/4gbx1333/4gbx1333/2gbx1333/2gbx1333/
- pve .12 (1 IDE free)
- --------------------------------------------------------------
- SATA1 ’sda’ - OS - 300GB
- SATA2 ’sdb’ *osd2->sdg13 - WD BLUE R36KV - 4TB
- SATA3 ’sdc’ *osd4->sdg8 - Hitachi PHZ8L - 500GB
- SATA4 -
- SATA5 ’sde’ *osd10->sdg11 - Toshiba 0BZS - 640GB
- SATA6 ’sdf’ *osd5->sdg12 - Samsung 0311 - 500GB
- SATACARD1 ’sdh’ - Kingston v300 F76F (372mb/sec) - 120G SSD Journals
- SATACARD2 ‘sdg' *osd16->sdg10 - Intel SSD 1207GN (415MB/sec) - 120GB SSD Cache pool
- SATACARD3 ’sdf’ *osd11->sdg9 - Micron SSD 1240BB (342mb/sec) - 120GB SSD Cache pool
- SATACARD4 -
- --------------------------------------------------------------
- 5 TB + 360 GB SSD
- RAM: 4gbx1333/4gbx1333/4gbx1333/4gbx1333/<free>
- pve3 .13 (1 SATA power from IDE-SATA splitter)
- --------------------------------------------------------------
- SATA1 ’sda’ - OS - 300GB
- SATA1 ’sdb’ *osd3->sdf19 - WD BLUE LLKYHZ- 4TB
- SATA3 ’sdc' *osd0->sdf15 - ST9500420AS / - 500gb
- SATA4 ’sdd' *osd1->sdf16 - Hitachi DE1XMX- 500GB
- SATA5 ’sde’ *osd6->sdf17 - Samsung ST500/- 500GB
- SATA6 -
- SATACARD1 ’sdh’ osd9->sdf18 - Samsung EVO 325T (458MB/sec) - 120GB SSD Cache pool
- SATACARD2 ‘sdg' - Kingston v300 0318DA (429MB/sec) - 120G SSD Journals
- SATACARD3 ’sdf’ *osd15->sdg14 - Intel (251mb/sec) - 160GB SSD Cache pool
- SATACARD4 -
- --------------------------------------------------------------
- ceph.conf
- [global]
- auth client required = cephx
- auth cluster required = cephx
- auth service required = cephx
- cluster network = 172.16.1.0/24
- mon_max_pg_per_osd = 500
- osd_max_pg_per_osd_hard_ratio = 3
- osd_max_backfills = 5
- osd_recovery_max_active = 5
- bluestore_block_db_size = 16106127360
- bluestore_block_wal_size = 16106127360
- rbd_cache = true
- osd scrub begin hour = 1
- osd scrub end hour = 8
- osd scrub sleep = 0.1
- debug asok = 0/0
- debug auth = 0/0
- debug buffer = 0/0
- debug client = 0/0
- debug context = 0/0
- debug crush = 0/0
- debug filer = 0/0
- debug filestore = 0/0
- debug finisher = 0/0
- debug heartbeatmap = 0/0
- debug journal = 0/0
- debug journaler = 0/0
- debug lockdep = 0/0
- debug mds = 0/0
- debug mds balancer = 0/0
- debug mds locker = 0/0
- debug mds log = 0/0
- debug mds log expire = 0/0
- debug mds migrator = 0/0
- debug mon = 0/0
- debug monc = 0/0
- debug ms = 0/0
- debug objclass = 0/0
- debug objectcacher = 0/0
- debug objecter = 0/0
- debug optracker = 0/0
- debug osd = 0/0
- debug paxos = 0/0
- debug perfcounter = 0/0
- debug rados = 0/0
- debug rbd = 0/0
- debug rgw = 0/0
- debug throttle = 0/0
- debug timer = 0/0
- debug tp = 0/0
- fsid = e44fbe1c-b1c7-481d-bd25-dc595eae2d13
- keyring = /etc/pve/priv/$cluster.$name.keyring
- mon allow pool delete = true
- mon osd allow primary affinity = true
- osd journal size = 28120
- osd pool default min size = 2
- osd pool default size = 3
- public network = 192.168.1.0/24
- [osd]
- filestore xattr use omap = true
- keyring = /var/lib/ceph/osd/ceph-$id/keyring
- osd crush update on start = false
- [mon.pve]
- host = pve
- mon addr = 192.168.1.12:6789
- [mon.pve11]
- host = pve11
- mon addr = 192.168.1.11:6789
- [mon.pve3]
- host = pve3
- mon addr = 192.168.1.13:6789
- ---root@pve:~# ceph -v
- ceph version 12.2.7 (94ce186ac93bb28c3c444bccfefb8a31eb0748e4) luminous (stable)
- root@pve:~# pveversion
- pve-manager/5.2-8/fdf39912 (running kernel: 4.15.18-3-pve)
- root@pve:~# ceph df
- GLOBAL:
- SIZE AVAIL RAW USED %RAW USED
- 17890G 8604G 9285G 51.90
- POOLS:
- NAME ID USED %USED MAX AVAIL OBJECTS
- ceph_pool 11 3357G 63.49 1931G 895029
- cache_pool 16 54066M 20.97 198G 17051
- cephfs_data 21 0 0 1931G 0
- cephfs_metadata 22 0 0 1931G 0
- root@pve:~# ceph osd tree
- ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
- -1 17.47096 root default
- -3 5.35599 host pve
- 2 hdd 3.63899 osd.2 up 1.00000 0
- 4 hdd 0.45499 osd.4 up 1.00000 1.00000
- 5 hdd 0.45499 osd.5 up 1.00000 1.00000
- 10 hdd 0.58199 osd.10 up 1.00000 0
- 11 ssd 0.11600 osd.11 up 1.00000 1.00000
- 16 ssd 0.10899 osd.16 up 1.00000 1.00000
- -10 6.85799 host pve11
- 7 hdd 5.45799 osd.7 up 1.00000 0
- 8 hdd 0.45499 osd.8 up 1.00000 1.00000
- 12 hdd 0.58199 osd.12 up 1.00000 0.79999
- 13 ssd 0.14499 osd.13 up 1.00000 1.00000
- 14 ssd 0.21799 osd.14 up 1.00000 1.00000
- -7 5.25699 host pve3
- 0 hdd 0.45499 osd.0 up 1.00000 1.00000
- 1 hdd 0.45499 osd.1 up 1.00000 1.00000
- 3 hdd 3.63899 osd.3 up 1.00000 0
- 6 hdd 0.45499 osd.6 up 1.00000 1.00000
- 9 ssd 0.10899 osd.9 up 1.00000 1.00000
- 15 ssd 0.14499 osd.15 up 1.00000 1.00000
- root@pve:~/proxmox_custom_conf# ceph-disk list
- /dev/sda :
- /dev/sda1 other, 21686148-6449-6e6f-744e-656564454649
- /dev/sda2 other, vfat
- /dev/sda3 other, LVM2_member
- /dev/sdb :
- /dev/sdb1 ceph data, active, cluster ceph, osd.2, block /dev/sdb2, block.db /dev/sdh13
- /dev/sdb2 ceph block, for /dev/sdb1
- /dev/sdc :
- /dev/sdc1 ceph data, active, cluster ceph, osd.4, block /dev/sdc2, block.db /dev/sdh8
- /dev/sdc2 ceph block, for /dev/sdc1
- /dev/sdd :
- /dev/sdd1 ceph data, active, cluster ceph, osd.10, block /dev/sdd2, block.db /dev/sdh11
- /dev/sdd2 ceph block, for /dev/sdd1
- /dev/sde :
- /dev/sde1 ceph data, active, cluster ceph, osd.5, block /dev/sde2, block.db /dev/sdh12
- /dev/sde2 ceph block, for /dev/sde1
- /dev/sdf :
- /dev/sdf1 ceph data, active, cluster ceph, osd.11, block /dev/sdf2, block.db /dev/sdh9
- /dev/sdf2 ceph block, for /dev/sdf1
- /dev/sdg :
- /dev/sdg1 ceph data, active, cluster ceph, osd.16, block /dev/sdg2, block.db /dev/sdh10
- /dev/sdg2 ceph block, for /dev/sdg1
- /dev/sdh :
- /dev/sdh10 ceph block.db, for /dev/sdg1
- /dev/sdh11 ceph block.db, for /dev/sdd1
- /dev/sdh12 ceph block.db, for /dev/sde1
- /dev/sdh13 ceph block.db, for /dev/sdb1
- /dev/sdh8 ceph block.db, for /dev/sdc1
- /dev/sdh9 ceph block.db, for /dev/sdf1
- Model: ATA KINGSTON SV300S3 (scsi)
- Disk /dev/sdh: 120GB
- Sector size (logical/physical): 512B/4096B
- Partition Table: gpt
- Disk Flags:
- Number Start End Size File system Name Flags
- 8 7517MB 23.6GB 16.1GB ceph block.db
- 9 23.6GB 39.7GB 16.1GB ceph block.db
- 10 39.7GB 55.8GB 16.1GB ceph block.db
- 11 55.8GB 71.9GB 16.1GB ceph block.db
- 12 71.9GB 88.0GB 16.1GB ceph block.db
- 13 88.0GB 104GB 16.1GB ceph block.db
Advertisement
Add Comment
Please, Sign In to add comment