作者:张华 发表于:2024-09-24
版权声明:可以任意转载,转载时请务必以超链接形式标明文章原始出处和作者信息及本版权声明(http://blog.csdn.net/quqi99)
sudo snap install microceph --channel=reef/stable
sudo microceph cluster bootstrap
journalctl -u snap.microceph.* -f
sudo microceph.ceph status
sudo microceph disk list
#sudo microceph disk add loop,4G,3#lvm thinpool - https://microk8s.io/docs/how-to-ceph
lsblk | grep -v loop |grep -v mount |grep -v schroot |grep -v nbd
sudo apt install lvm2 -y
#lvm device is rejected by filter config - global_filter = [ "a|loop0|", "a|loop23|", "a|/dev/nvme0n1p3|", "r|.*|"]
zfs_block_partition="/dev/nvme0n1p3"
#sudo lvremove -f /dev/myvg/mylv && sudo vgremove -f myvg && sudo pvremove /dev/nvme0n1p3
sudo vgcreate myvg $zfs_block_partition && sudo vgdisplay myvg
#sudo lvcreate -L 100G -n mylv myvg #for non-thinpool lvm
sudo lvcreate --type thin-pool --name mythinpool --size 100G myvg
sudo lvcreate --type thin --name osd1 --virtualsize 30G myvg/mythinpool
sudo lvcreate --type thin --name osd2 --virtualsize 30G myvg/mythinpool
sudo lvcreate --type thin --name osd3 --virtualsize 30G myvg/mythinpool
ls /dev/myvg/osd*
for i in 1 2 3; do sudo microceph disk add --wipe "/dev/myvg/osd$i"; done#this workaround doesn't work
sudo -i
if [[ ! $(grep Cephy /var/lib/snapd/apparmor/profiles/snap.microceph.daemon) ]]; thensed -i '/loopback control$/a \/dev\/myvg\/osd\[0-9\] rwk,\t\t\t\t\t\t# Cephy' /var/lib/snapd/apparmor/profiles/snap.microceph.daemon;apparmor_parser -r /var/lib/snapd/apparmor/profiles/snap.microceph.daemon;
fiif [[ ! $(grep Cephy /var/lib/snapd/apparmor/profiles/snap.microceph.osd) ]]; thensed -i '/loopback control$/a \/dev\/myvg\/osd\[0-9\] rwk,\t\t\t\t\t\t# Cephy' /var/lib/snapd/apparmor/profiles/snap.microceph.osd;apparmor_parser -r /var/lib/snapd/apparmor/profiles/snap.microceph.osd;
fi
exit#so we have to use this workaroud
echo -n complain |sudo tee /sys/module/apparmor/parameters/mode
#echo -n enforce | sudo tee /sys/module/apparmor/parameters/modefor i in 1 2 3; do sudo microceph disk add --wipe "/dev/myvg/osd$i"; done#https://microk8s.io/docs/how-to-ceph
#set the replica count to be 2, we disable manager redirects, and we set the bucket type to use for chooseleaf in a CRUSH rule to 0
sudo /snap/bin/microceph.ceph config set global osd_pool_default_size 2
sudo /snap/bin/microceph.ceph config set mgr mgr_standby_modules false
sudo /snap/bin/microceph.ceph config set osd osd_crush_chooseleaf_type 0# https://canonical-microceph.readthedocs-hosted.com/_/downloads/en/latest/pdf/?utm_source=canonical-microceph&utm_content=flyout
sudo /snap/bin/microceph.ceph -s
alias ceph='sudo /snap/bin/microceph.ceph'
ceph osd pool create block_pool && ceph osd lspools
rbd pool init block_pool
rbd create bd_foo --size 8192 --image-feature layering -p block_pool #create rbd image
rbd list -p block_pool