Advertisement
Guest User

ceph.con

a guest
Jul 17th, 2014
239
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.60 KB | None | 0 0
  1. [global]
  2. auth client required = cephx
  3. auth cluster required = cephx
  4. auth service required = cephx
  5. auth supported = cephx
  6. cluster network = 172.28.10.0/24
  7. filestore xattr use omap = true
  8. fsid = dcd915d2-daff-4974-9910-970166a1aeaf
  9. keyring = /etc/pve/priv/$cluster.$name.keyring
  10. osd journal size = 5120
  11. osd pool default min size = 1
  12. public network = 172.28.10.0/24
  13. osd crush update on start = false
  14.  
  15. [client]
  16. rbd cache = true
  17. rbd cache size = 1073741824
  18. rbd cache max dirty = 536870912
  19. rbd cache max dirty age = 100
  20.  
  21. [osd]
  22. keyring = /var/lib/ceph/osd/ceph-$id/keyring
  23. osd recovery max active = 1
  24. osd max backfills = 1
  25. osd mkfs options xfs = "-f -i size=2048"
  26. osd mount options xfs = "rw,noatime,nobarrier,logbsize=256k,logbufs=8,inode64,allocsize=4M,discard"
  27. #osd mount options xfs = "rw,noatime,nobarrier,logbufs=8,inode64,discard"
  28. osd op threads = 4
  29. # Test
  30. osd disk threads = 2
  31. journal dio = true
  32. journal aio = true
  33. journal max write bytes = 1 GiB
  34. journal max write entries = 50000
  35. journal queue max bytes = 1 GiB
  36. journal queue max ops = 50000
  37.  
  38. filestore op threads = 6
  39. filestore queue max ops = 4096
  40. filestore queue max bytes = 16 MiB
  41. filestore queue committing max ops = 4096
  42. filestore queue committing max bytes = 16 MiB
  43. filestore min sync interval = 15
  44. filestore max sync interval = 15
  45. filestore fd cache size = 10240
  46. filestore journal parallel = true
  47.  
  48. [mon.0]
  49. host = nodo3
  50. mon addr = 172.28.10.203:6789
  51.  
  52. [mon.1]
  53. host = nodo1
  54. mon addr = 172.28.10.201:6789
  55.  
  56. [mon.2]
  57. host = nodo2
  58. mon addr = 172.28.10.202:6789
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement