Advertisement
echoslider

finish_ceph_single_server

May 31st, 2023 (edited)
411
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Bash 10.92 KB | None | 0 0
  1. #checks "ceph -s" "ceph orch ps" "ceph log last cephadm"
  2. # dashboard on port 8443
  3. # grafana on port 3000
  4. #CEPH ON SINGLE NODE!!!! (cluster configuration on bottom)
  5.  
  6.  
  7. ######set variables######
  8. host=`hostname -s`
  9. ip4=`ip -o -4 addr list $(ip r|grep "default"|awk '{print $5}') | awk '{print $4}' | cut -d/ -f1`
  10.  
  11. #network=`ip r|grep $ip4|awk '{print $1}'`
  12. #--cluster-network $network \
  13.  
  14.  
  15. #Install basic Packages that need for create ceph, ceph cluster, iscsi cluster, s3 bucket storage, grafana
  16. #apt-get -y install podman cephadm ceph-common ceph-base lvm2 \
  17. #   ceph-iscsi radosgw nfs-ganesha-ceph cephfs-shell \
  18. #    targetcli-fb ceph-iscsi python3-rtslib-fb tcmu-runner
  19.  
  20. #new way with binary
  21. apt-get -y install podman ceph-common ceph-base lvm2 \
  22.     ceph-iscsi radosgw nfs-ganesha-ceph cephfs-shell \
  23.     targetcli-fb ceph-iscsi python3-rtslib-fb tcmu-runner
  24.  
  25. curl --silent --remote-name --location https://github.com/ceph/ceph/raw/quincy/src/cephadm/cephadm
  26. chmod +x cephadm
  27. sudo mv cephadm  /usr/local/bin/
  28.  
  29.  
  30. #deploy ceph + ceph dashboard + grafana
  31. cephadm bootstrap \
  32. --mon-ip $ip4 \
  33. --dashboard-password-noupdate \
  34. --initial-dashboard-user admin \
  35. --initial-dashboard-password "p@ssw0rd"
  36.  
  37.  
  38. #deploy minimal ceph.conf on ceph nodes when need. but better to copy all from /etc/ceph to nodes
  39. ceph config set mgr mgr/cephadm/manage_etc_ceph_ceph_conf true
  40.  
  41. #disable nfs server. it just need to be installed.
  42. #ceph is using it when you create new nfs service.
  43. #but if nfs-ganesha is still running the ceph nfs service will not run
  44. systemctl disable nfs-ganesha
  45.  
  46. #enable telemetry and prometheus
  47. #ceph telemetry on --license sharing-1-0
  48. #ceph telemetry enable channel perf
  49. #ceph mgr module enable prometheus
  50.  
  51.  
  52. ######important settings for single servers######
  53. ceph osd getcrushmap -o crushmap.cm
  54. crushtool --decompile crushmap.cm -o crushmap.txt
  55. sed -i s/"step chooseleaf firstn 0 type host"/"step chooseleaf firstn 0 type osd"/g crushmap.txt
  56. crushtool --compile crushmap.txt -o new_crushmap.cm
  57. ceph osd setcrushmap -i new_crushmap.cm
  58.  
  59.  
  60. ######add all free disks######
  61. ######repeat this after add more disks. works as cronjob######
  62. ######on master node it will also add all available disks on all other nodes when add host######
  63. ceph orch apply osd --all-available-devices
  64.  
  65.  
  66.  
  67. ######allow delete pools/rbd######
  68. ceph tell mon.* injectargs '--mon-allow-pool-delete true'
  69.  
  70. ######create cephfs for store normal files######
  71. #variables for ip need
  72. ceph osd pool create cephfs0_data replicated
  73. ceph osd pool create cephfs0_metadata replicated
  74.  
  75. ceph fs new cephfs0 cephfs0_metadata cephfs0_data
  76. ceph orch apply mds cephfs0 1
  77. ceph fs authorize cephfs0 client.user / rw | tee /etc/ceph/ceph.client.user.keyring
  78. mkdir -p /mnt/cephfs0
  79.  
  80. cat >> /etc/fstab << FSTABMOUNT
  81. $ip4:/ /mnt/cephfs0 ceph name=user,noatime,nodiratime,_netdev 0 0
  82. FSTABMOUNT
  83.  
  84. #entry for multiple servers(add monitor servers):
  85. #$ip4,$ip4.$ip4,$ip4:/ /mnt/cephfs0 ceph name=user,noatime,nodiratime,_netdev 0 0
  86.  
  87. mount -a
  88.  
  89.  
  90. ######create rdb and volume/blockdevice/disk######
  91. ceph osd pool create rbd
  92. ceph osd pool application enable rbd rbd
  93. rbd pool init -p rbd
  94. rbd create daten --size 20G
  95.  
  96. #show disks
  97. rbd list
  98.  
  99. #create usable linux device
  100. rbd map daten --pool rbd --name client.admin
  101.  
  102. ls -lart /dev/rbd/rbd/*
  103.  
  104. #format the disk with xfs
  105. sgdisk -n1:0:0 -c 1:"daten" -t 1:8300 /dev/rbd/rbd/daten
  106. mkfs.xfs -fL daten /dev/rbd/rbd/daten-part1
  107.  
  108. #mount it
  109. mkdir /mnt/data
  110. mount /dev/rbd/rbd/data-part1 /mnt/data
  111.  
  112. #calculate used size. expand on 80% usage
  113. rbd diff rbd/data | awk '{ SUM += $2 } END { print SUM/1024/1024 " MB" }'
  114.  
  115. #expand disk. not umount disk. NEVER shrink!!!
  116. rbd --pool=rbd --size=51200 resize daten
  117. cfdisk /dev/rbd/rbd/daten
  118. xfs_growfs /mnt/daten
  119.  
  120. #delete rbd
  121. rbd unmap daten
  122. ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
  123.  
  124.  
  125. ######install gateway for S3 buckets######
  126. #variables for ip and host need
  127. radosgw-admin realm create --rgw-realm=default --default
  128. radosgw-admin zonegroup create --rgw-zonegroup=default  --master --default
  129. radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=default --master --default
  130. radosgw-admin period update --rgw-realm=default --commit
  131. ceph orch apply rgw default --realm=default --zone=default --placement="1 $host"
  132.  
  133.  
  134. ---------------------------------------------------------------------------------------------------
  135. #########################################OPTIMIZE##################################################
  136. ---------------------------------------------------------------------------------------------------
  137. ######cache tuning######
  138. ceph config set osd osd_memory_target 2G
  139. ceph config set mds mds_cache_memory_limit 2G
  140.  
  141. ######integration cloudstack tuning######
  142. ceph config set mon auth_expose_insecure_global_id_reclaim false
  143. ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false
  144. ceph config set mon auth_allow_insecure_global_id_reclaim false
  145. ceph orch restart mon
  146.  
  147.  
  148. ######change copies######
  149. ceph osd pool set cephfs0_data size 3
  150. ceph osd pool set cephfs0_data min_size 2
  151. ceph osd pool set cephfs0_metadata size 3
  152. ceph osd pool set cephfs0_metadata min_size 2
  153.  
  154.  
  155. ######optimize pgs######
  156. #ceph osd pool set cephfs0_data pg_num 128
  157. #ceph osd pool set cephfs0_metadata pg_num 256
  158. #ceph osd pool set cephfs0_metadata pg_autoscale_mode on
  159. #ceph osd pool set cephfs0_data pg_autoscale_mode on
  160. #ceph osd pool set cephfs0_data pg_autoscale_mode off
  161. #ceph osd pool set cephfs0_data pg_num XXX
  162.  
  163.  
  164. ---------------------------------------------------------------------------------------------------
  165. ###################################CLUSTER CONFIGURATION###########################################
  166. ---------------------------------------------------------------------------------------------------
  167.  
  168. #login on server 1
  169.  
  170. #!!!change back your crushmap config!!!
  171. ceph osd getcrushmap -o crushmap.cm
  172. crushtool --decompile crushmap.cm -o crushmap.txt
  173. sed -i s/"step chooseleaf firstn 0 type osd"/"step chooseleaf firstn 0 type host"/g crushmap.txt
  174. crushtool --compile crushmap.txt -o new_crushmap.cm
  175. ceph osd setcrushmap -i new_crushmap.cm
  176.  
  177. #create user that is used for add hosts without password
  178. adduser cephcluster --disabled-password --gecos ""
  179. echo 'cephcluster:'p@ssw0rd''|chpasswd
  180.  
  181. echo "cephcluster ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephcluster
  182. chmod 0440 /etc/sudoers.d/cephcluster
  183.  
  184. #login to you used and change the default user that ceph is using for add hosts (instead of root)
  185. su cephcluster
  186. ssh-keygen
  187. ssh-copy-id cephcluster@localhost
  188. ssh-copy-id -f -i /etc/ceph/ceph.pub cephcluster@localhost
  189. sudo ceph cephadm set-user cephcluster
  190.  
  191. #add all your nodes to hosts file or setup your DNS server
  192. vi /etc/hosts
  193. IP1 NODENAME1
  194. IP2 NODENAME2
  195. IP3 NODENAME3
  196.  
  197. #login on server 2
  198. #install ceph basic packages for make ceph command available from node
  199. apt-get install ceph-common podman
  200.  
  201. #create user, set permissions, generate key
  202. adduser cephcluster --disabled-password --gecos ""
  203. echo 'cephcluster:'p@ssw0rd''|chpasswd
  204.  
  205. echo "cephcluster ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephcluster
  206. chmod 0440 /etc/sudoers.d/cephcluster
  207.  
  208. su cephcluster
  209. ssh-keygen
  210. ssh-copy-id cephcluster@localhost
  211.  
  212. #on each server setup your hosts file if you not have an DNS server
  213. sudo vi /etc/hosts
  214. IP1 NODENAME1
  215. IP2 NODENAME2
  216. IP3 NODENAME3
  217.  
  218. #login to server 1 for copy config files to new node
  219. scp /etc/ceph/*  cephcluster@NODENAME1:/home/cephcluster/
  220. ssh-copy-id -f -i /etc/ceph/ceph.pub cephcluster@NODENAME1
  221.  
  222. #login to server 2
  223. #move all config files to /etc/ceph
  224. sudo mv /home/cephcluster/ceph* /etc/ceph/
  225.  
  226. #login on server 1 and add new node to ceph cluster
  227. ceph orch host add NODENAME1 IP1
  228.  
  229. #let ceph scan for new disks
  230. ceph orch apply osd --all-available-devices
  231.  
  232.  
  233. ######add more mon,mds,mgr######
  234. cephhosts=`cat /etc/hosts | awk 'NR > 1 { if (line) line = line " "; line = line $2 } END { print line }'`
  235. cephhostsanz=`cat /etc/hosts | awk 'NR > 1 {print $2}'|wc -l`
  236. ceph orch apply mds cephfs0 --placement="$cephhostsanz $cephhosts"
  237.  
  238. ######add more monitor######
  239. cat /etc/hosts | awk 'NR > 1 { if (line) line = line ","; line = line $2 } END { print line }' | xargs -I {} ceph orch apply mon {}
  240.  
  241. ######add more manager######
  242. cat /etc/hosts | awk 'NR > 1 { if (line) line = line ","; line = line $2 } END { print line }' | xargs -I {} ceph orch apply mgr {}
  243.  
  244. ######add more dashboard######
  245. #login on server 2:
  246. ceph mgr module enable dashboard
  247.  
  248. #enable balancer
  249. ceph mgr module enable balancer
  250. ceph balancer on
  251.  
  252.  
  253. ---------------------------------------------------------------------------------------------------
  254. ---------------------------------------------------------------------------------------------------
  255. ######TEST####
  256. ---------------------------------------------------------------------------------------------------
  257. ---------------------------------------------------------------------------------------------------
  258.  
  259.  
  260. ######install iscsi gateway (not work good. gateway not come up all time)######
  261. cat > /etc/ceph/gw1 << GW1CONF
  262. http://admin:admin@$ip4:5000
  263. GW1CONF
  264.  
  265. cat > /etc/ceph/iscsi-gateway.cfg << IGCONF
  266. [config]
  267. cluster_name = ceph
  268. gateway_keyring = ceph.client.admin.keyring
  269. pool = iscsi
  270. api_secure = false
  271. api_port = 5001
  272. api_user = admin
  273. api_password = admin
  274. trusted_ip_list = $ip4
  275. IGCONF
  276.  
  277. cat >> /etc/ceph/ceph.conf << CCONF
  278.  
  279. [osd]
  280. osd heartbeat grace = 20
  281. osd heartbeat interval = 5
  282. osd client watch timeout = 25
  283. CCONF
  284.  
  285. ceph osd pool create iscsi
  286. rbd pool init iscsi
  287.  
  288. systemctl daemon-reload
  289.  
  290. ceph osd blacklist clear
  291.  
  292. systemctl start rbd-target-gw
  293. systemctl status rbd-target-gw
  294. systemctl enable rbd-target-gw
  295.  
  296. ceph osd blacklist clear
  297.  
  298. systemctl start rbd-target-api
  299. systemctl status rbd-target-api
  300. systemctl enable rbd-target-api
  301.  
  302. #reboot your system
  303.  
  304. ceph dashboard set-iscsi-api-ssl-verification false
  305. ceph dashboard iscsi-gateway-add -i /etc/ceph/gw1 $host
  306.  
  307. #https://www.redhat.com/sysadmin/ceph-cluster-single-machine
  308. #https://www.virtualtothecore.com/adventures-with-ceph-storage-part-6-mount-ceph-as-a-block-device-on-linux-machines/
  309. #https://blog.nuvotex.de/creating-and-mounting-filesystems-using-cephfs/
  310. #https://balderscape.medium.com/how-to-set-up-samba-to-share-a-ceph-filesystem-with-active-directory-access-control-ee96e172b67b
  311. #https://documentation.suse.com/ses/7/html/ses-all/bp-troubleshooting-monitors.html
  312. #https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_ubuntu/index
  313. #https://www.linux-magazin.de/ausgaben/2016/07/clustered-samba/
  314. #https://docs.ceph.com/en/octopus/cephadm/operations/
  315. #https://blog.kingj.net/2020/02/23/how-to/accessing-cephfs-over-samba/
  316. #https://docs.ceph.com/en/latest/mgr/administrator/
  317. #https://www.youtube.com/watch?v=Qx6lxotzI0k
  318. #https://access.redhat.com/documentation/de-de/red_hat_ceph_storage/5/html/operations_guide/management-of-managers-using-the-ceph-orchestrator
  319.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement