Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- root@mgr01# ceph osd dump | grep '^pool.* erasure'
- pool 23 'cephfs.data' erasure size 7 min_size 3 crush_rule 2 object_hash rjenkins pg_num 1024 pgp_num 1024 last_change 196579 lfor 196566/196566 flags hashpspool,ec_overwrites,selfmanaged_snaps max_bytes 50000000000000 tiers 25 read_tier 25 write_tier 25 stripe_width 8192 application cephfs
- root@mgr01# ceph osd erasure-code-profile get cephfs
- crush-device-class=hdd
- crush-failure-domain=host
- crush-root=hdd
- jerasure-per-chunk-alignment=false
- k=5
- m=2
- plugin=jerasure
- technique=reed_sol_van
- w=8
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement