Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #checks "ceph -s" "ceph orch ps" "ceph log last cephadm"
- # dashboard on port 8443
- # grafana on port 3000
- #CEPH ON SINGLE NODE!!!! (cluster configuration on bottom)
- ######set variables######
- host=`hostname -s`
- ip4=`ip -o -4 addr list $(ip r|grep "default"|awk '{print $5}') | awk '{print $4}' | cut -d/ -f1`
- #network=`ip r|grep $ip4|awk '{print $1}'`
- #--cluster-network $network \
- #Install basic Packages that need for create ceph, ceph cluster, iscsi cluster, s3 bucket storage, grafana
- #apt-get -y install podman cephadm ceph-common ceph-base lvm2 \
- # ceph-iscsi radosgw nfs-ganesha-ceph cephfs-shell \
- # targetcli-fb ceph-iscsi python3-rtslib-fb tcmu-runner
- #new way with binary
- apt-get -y install podman ceph-common ceph-base lvm2 \
- ceph-iscsi radosgw nfs-ganesha-ceph cephfs-shell \
- targetcli-fb ceph-iscsi python3-rtslib-fb tcmu-runner
- curl --silent --remote-name --location https://github.com/ceph/ceph/raw/quincy/src/cephadm/cephadm
- chmod +x cephadm
- sudo mv cephadm /usr/local/bin/
- #deploy ceph + ceph dashboard + grafana
- cephadm bootstrap \
- --mon-ip $ip4 \
- --dashboard-password-noupdate \
- --initial-dashboard-user admin \
- --initial-dashboard-password "p@ssw0rd"
- #deploy minimal ceph.conf on ceph nodes when need. but better to copy all from /etc/ceph to nodes
- ceph config set mgr mgr/cephadm/manage_etc_ceph_ceph_conf true
- #disable nfs server. it just need to be installed.
- #ceph is using it when you create new nfs service.
- #but if nfs-ganesha is still running the ceph nfs service will not run
- systemctl disable nfs-ganesha
- #enable telemetry and prometheus
- #ceph telemetry on --license sharing-1-0
- #ceph telemetry enable channel perf
- #ceph mgr module enable prometheus
- ######important settings for single servers######
- ceph osd getcrushmap -o crushmap.cm
- crushtool --decompile crushmap.cm -o crushmap.txt
- sed -i s/"step chooseleaf firstn 0 type host"/"step chooseleaf firstn 0 type osd"/g crushmap.txt
- crushtool --compile crushmap.txt -o new_crushmap.cm
- ceph osd setcrushmap -i new_crushmap.cm
- ######add all free disks######
- ######repeat this after add more disks. works as cronjob######
- ######on master node it will also add all available disks on all other nodes when add host######
- ceph orch apply osd --all-available-devices
- ######allow delete pools/rbd######
- ceph tell mon.* injectargs '--mon-allow-pool-delete true'
- ######create cephfs for store normal files######
- #variables for ip need
- ceph osd pool create cephfs0_data replicated
- ceph osd pool create cephfs0_metadata replicated
- ceph fs new cephfs0 cephfs0_metadata cephfs0_data
- ceph orch apply mds cephfs0 1
- ceph fs authorize cephfs0 client.user / rw | tee /etc/ceph/ceph.client.user.keyring
- mkdir -p /mnt/cephfs0
- cat >> /etc/fstab << FSTABMOUNT
- $ip4:/ /mnt/cephfs0 ceph name=user,noatime,nodiratime,_netdev 0 0
- FSTABMOUNT
- #entry for multiple servers(add monitor servers):
- #$ip4,$ip4.$ip4,$ip4:/ /mnt/cephfs0 ceph name=user,noatime,nodiratime,_netdev 0 0
- mount -a
- ######create rdb and volume/blockdevice/disk######
- ceph osd pool create rbd
- ceph osd pool application enable rbd rbd
- rbd pool init -p rbd
- rbd create daten --size 20G
- #show disks
- rbd list
- #create usable linux device
- rbd map daten --pool rbd --name client.admin
- ls -lart /dev/rbd/rbd/*
- #format the disk with xfs
- sgdisk -n1:0:0 -c 1:"daten" -t 1:8300 /dev/rbd/rbd/daten
- mkfs.xfs -fL daten /dev/rbd/rbd/daten-part1
- #mount it
- mkdir /mnt/data
- mount /dev/rbd/rbd/data-part1 /mnt/data
- #calculate used size. expand on 80% usage
- rbd diff rbd/data | awk '{ SUM += $2 } END { print SUM/1024/1024 " MB" }'
- #expand disk. not umount disk. NEVER shrink!!!
- rbd --pool=rbd --size=51200 resize daten
- cfdisk /dev/rbd/rbd/daten
- xfs_growfs /mnt/daten
- #delete rbd
- rbd unmap daten
- ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
- ######install gateway for S3 buckets######
- #variables for ip and host need
- radosgw-admin realm create --rgw-realm=default --default
- radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
- radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=default --master --default
- radosgw-admin period update --rgw-realm=default --commit
- ceph orch apply rgw default --realm=default --zone=default --placement="1 $host"
- ---------------------------------------------------------------------------------------------------
- #########################################OPTIMIZE##################################################
- ---------------------------------------------------------------------------------------------------
- ######cache tuning######
- ceph config set osd osd_memory_target 2G
- ceph config set mds mds_cache_memory_limit 2G
- ######integration cloudstack tuning######
- ceph config set mon auth_expose_insecure_global_id_reclaim false
- ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false
- ceph config set mon auth_allow_insecure_global_id_reclaim false
- ceph orch restart mon
- ######change copies######
- ceph osd pool set cephfs0_data size 3
- ceph osd pool set cephfs0_data min_size 2
- ceph osd pool set cephfs0_metadata size 3
- ceph osd pool set cephfs0_metadata min_size 2
- ######optimize pgs######
- #ceph osd pool set cephfs0_data pg_num 128
- #ceph osd pool set cephfs0_metadata pg_num 256
- #ceph osd pool set cephfs0_metadata pg_autoscale_mode on
- #ceph osd pool set cephfs0_data pg_autoscale_mode on
- #ceph osd pool set cephfs0_data pg_autoscale_mode off
- #ceph osd pool set cephfs0_data pg_num XXX
- ---------------------------------------------------------------------------------------------------
- ###################################CLUSTER CONFIGURATION###########################################
- ---------------------------------------------------------------------------------------------------
- #login on server 1
- #!!!change back your crushmap config!!!
- ceph osd getcrushmap -o crushmap.cm
- crushtool --decompile crushmap.cm -o crushmap.txt
- sed -i s/"step chooseleaf firstn 0 type osd"/"step chooseleaf firstn 0 type host"/g crushmap.txt
- crushtool --compile crushmap.txt -o new_crushmap.cm
- ceph osd setcrushmap -i new_crushmap.cm
- #create user that is used for add hosts without password
- adduser cephcluster --disabled-password --gecos ""
- echo 'cephcluster:'p@ssw0rd''|chpasswd
- echo "cephcluster ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephcluster
- chmod 0440 /etc/sudoers.d/cephcluster
- #login to you used and change the default user that ceph is using for add hosts (instead of root)
- su cephcluster
- ssh-keygen
- ssh-copy-id cephcluster@localhost
- ssh-copy-id -f -i /etc/ceph/ceph.pub cephcluster@localhost
- sudo ceph cephadm set-user cephcluster
- #add all your nodes to hosts file or setup your DNS server
- vi /etc/hosts
- IP1 NODENAME1
- IP2 NODENAME2
- IP3 NODENAME3
- #login on server 2
- #install ceph basic packages for make ceph command available from node
- apt-get install ceph-common podman
- #create user, set permissions, generate key
- adduser cephcluster --disabled-password --gecos ""
- echo 'cephcluster:'p@ssw0rd''|chpasswd
- echo "cephcluster ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephcluster
- chmod 0440 /etc/sudoers.d/cephcluster
- su cephcluster
- ssh-keygen
- ssh-copy-id cephcluster@localhost
- #on each server setup your hosts file if you not have an DNS server
- sudo vi /etc/hosts
- IP1 NODENAME1
- IP2 NODENAME2
- IP3 NODENAME3
- #login to server 1 for copy config files to new node
- scp /etc/ceph/* cephcluster@NODENAME1:/home/cephcluster/
- ssh-copy-id -f -i /etc/ceph/ceph.pub cephcluster@NODENAME1
- #login to server 2
- #move all config files to /etc/ceph
- sudo mv /home/cephcluster/ceph* /etc/ceph/
- #login on server 1 and add new node to ceph cluster
- ceph orch host add NODENAME1 IP1
- #let ceph scan for new disks
- ceph orch apply osd --all-available-devices
- ######add more mon,mds,mgr######
- cephhosts=`cat /etc/hosts | awk 'NR > 1 { if (line) line = line " "; line = line $2 } END { print line }'`
- cephhostsanz=`cat /etc/hosts | awk 'NR > 1 {print $2}'|wc -l`
- ceph orch apply mds cephfs0 --placement="$cephhostsanz $cephhosts"
- ######add more monitor######
- cat /etc/hosts | awk 'NR > 1 { if (line) line = line ","; line = line $2 } END { print line }' | xargs -I {} ceph orch apply mon {}
- ######add more manager######
- cat /etc/hosts | awk 'NR > 1 { if (line) line = line ","; line = line $2 } END { print line }' | xargs -I {} ceph orch apply mgr {}
- ######add more dashboard######
- #login on server 2:
- ceph mgr module enable dashboard
- #enable balancer
- ceph mgr module enable balancer
- ceph balancer on
- ---------------------------------------------------------------------------------------------------
- ---------------------------------------------------------------------------------------------------
- ######TEST####
- ---------------------------------------------------------------------------------------------------
- ---------------------------------------------------------------------------------------------------
- ######install iscsi gateway (not work good. gateway not come up all time)######
- cat > /etc/ceph/gw1 << GW1CONF
- http://admin:admin@$ip4:5000
- GW1CONF
- cat > /etc/ceph/iscsi-gateway.cfg << IGCONF
- [config]
- cluster_name = ceph
- gateway_keyring = ceph.client.admin.keyring
- pool = iscsi
- api_secure = false
- api_port = 5001
- api_user = admin
- api_password = admin
- trusted_ip_list = $ip4
- IGCONF
- cat >> /etc/ceph/ceph.conf << CCONF
- [osd]
- osd heartbeat grace = 20
- osd heartbeat interval = 5
- osd client watch timeout = 25
- CCONF
- ceph osd pool create iscsi
- rbd pool init iscsi
- systemctl daemon-reload
- ceph osd blacklist clear
- systemctl start rbd-target-gw
- systemctl status rbd-target-gw
- systemctl enable rbd-target-gw
- ceph osd blacklist clear
- systemctl start rbd-target-api
- systemctl status rbd-target-api
- systemctl enable rbd-target-api
- #reboot your system
- ceph dashboard set-iscsi-api-ssl-verification false
- ceph dashboard iscsi-gateway-add -i /etc/ceph/gw1 $host
- #https://www.redhat.com/sysadmin/ceph-cluster-single-machine
- #https://www.virtualtothecore.com/adventures-with-ceph-storage-part-6-mount-ceph-as-a-block-device-on-linux-machines/
- #https://blog.nuvotex.de/creating-and-mounting-filesystems-using-cephfs/
- #https://balderscape.medium.com/how-to-set-up-samba-to-share-a-ceph-filesystem-with-active-directory-access-control-ee96e172b67b
- #https://documentation.suse.com/ses/7/html/ses-all/bp-troubleshooting-monitors.html
- #https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_ubuntu/index
- #https://www.linux-magazin.de/ausgaben/2016/07/clustered-samba/
- #https://docs.ceph.com/en/octopus/cephadm/operations/
- #https://blog.kingj.net/2020/02/23/how-to/accessing-cephfs-over-samba/
- #https://docs.ceph.com/en/latest/mgr/administrator/
- #https://www.youtube.com/watch?v=Qx6lxotzI0k
- #https://access.redhat.com/documentation/de-de/red_hat_ceph_storage/5/html/operations_guide/management-of-managers-using-the-ceph-orchestrator
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement