原因:2017年5月4日 星期四 理解Ceph。 说明:阅读书籍。
git clone https://github.com/ksingh7/ceph-cookbook.git
cd ceph-cookbook
vagrant up ceph-node1 ceph-node2 ceph-node3 //包在国外,注意访问外国网站
vagrant status ceph-node1 ceph-node2 ceph-node3 //The passwd And Account Of this Repository is both `vagrant`
ssh-copy-id vagrant@ceph-node2
firewall-cmd --zone=public --add-port=6789/tcp --permanent
firewall-cmd --zone=public --add-port=6800-7100/tcp --permanent
firewall-cmd --reload
firewall-cmd --zone=public --list-all
yum install ntp ntpdate -y
#vim /etc/ntp.conf
#Add server ntp1.aliyun.com //修改server为阿里云ntp服务器
ntpq -p
systemctl restart ntpd.service
systemctl enable ntpd.service
systemctl enable ntpdate.service
sudo yum install ceph-deploy -y
mkdir /etc/ceph & cd /etc/ceph
sudo ceph-deploy new ceph-node1 //创建新的ceph集群
sudo ceph-deploy install --release jewel --repo-url http://mirrors.ustc.edu.cn/ceph/rpm-jewel/el7 --gpg-url http://mirrors.ustc.edu.cn/ceph/keys/release.asc ceph-node1 ceph-node2 ceph-node3 //在所有节点安装ceph二进制软件包
ceph-deploy install --release hammer --repo-url http://mirrors.ustc.edu.cn/ceph/rpm-hammer/el7/ --gpg-url http://mirrors.ustc.edu.cn/ceph/keys/release.asc ceph-node1 ceph-node2 ceph-node3
ceph -v //查看安装好的ceph版本信息
sudo ceph-deploy mon create-initial
ceph -s
或者ceph status
查看ceph状态,这时状态为不健康sudo ceph-deploy disk list ceph-node1 //列出所有可用磁盘
sudo ceph-deploy disk zap ceph-node1:sdb ceph-node1:sdc ceph-node1:sdd //删除现有分区表和磁盘内容
sudo ceph-deploy osd create ceph-node1:sdb ceph-node1:sdc ceph-node1:sdd //准备磁盘
/etc/ceph/ceph.conf
/etc/hosts
中各个节点连接的网络,并非连接公网的公网。public network = 192.168.1.0/24 //根据你实际网络情况填写,比如我node1网络为192.168.1.101
cd /etc/ceph
sudo ceph-deploy mon create ceph-node2
sudo ceph-deploy mon create ceph-node3
ceph mon stat
查看具体的mon节点名以及ip等信息。sudo ceph-deploy disk list ceph-node2 ceph-node3 //列出所有可用磁盘
sudo ceph-deploy disk zap ceph-node2:sdb ceph-node2:sdc ceph-node2:sdd
sudo ceph-deploy disk zap ceph-node3:sdb ceph-node3:sdc ceph-node3:sdd //删除现有分区表和磁盘内容
sudo ceph-deploy osd create ceph-node2:sdb ceph-node2:sdc ceph-node2:sdd
sudo ceph-deploy osd create ceph-node3:sdb ceph-node3:sdc ceph-node3:sdd //准备磁盘
sudo ceph osd pool set rbd pg_num 256
sudo ceph osd pool set rbd pgp_num 256
仲裁quorom Quorom 机制,是一种分布式系统中常用的,用来保证数据冗余和最终一致性的投票算法,其主要数学思想来源于鸽巢原理。
ssh-copy-id ceph@client-node1
sudo ceph-deploy --username ceph install --release jewel --repo-url http://mirrors.ustc.edu.cn/ceph/rpm-jewel/el7 --gpg-url http://mirrors.ustc.edu.cn/ceph/keys/release.asc client-node1
sudo ceph-deploy --username ceph install --release jewel --repo-url http://mirrors.163.com/ceph/rpm-jewel/el7/x86_64/ --gpg-url http://mirrors.163.com/ceph/keys/release.asc client-node1
sudo ceph-deploy --username ceph config push client-node1
sudo ceph auth get-or-create client.rbd mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=rbd'
sudo ceph auth get-or-create client.rbd | ssh ceph@client-node1 sudo tee /etc/ceph/ceph.client.rbd.keyring
ssh ceph@client-node1
sudo su
cat /etc/ceph/ceph.client.rbd.keyring >> /etc/ceph/keyring
ceph -s --name client.rbd
rbd create rbd1 --size 10240 --image-format 2 --image-feature layering --name client.rbd //centos7.3内核只支持layering特性
rbd create rbd1 --size 20480 --name client.rbd
rbd ls --name client.rbd
rbd ls -p rbd --name client.rbd
rbd list --name client.rbd
rbd --image rbd1 info --name client.rbd
rbd map --image rbd1 --name client.rbd
rbd showmapped --name client.rbd
fdisk -l /dev/rbd0
mkfs.xfs /dev/rbd0
mkdir /mnt/ceph-disk1
mount /dev/rbd0 /mnt/ceph-disk1
df -h /mnt/ceph-disk1
wget https://raw.githubusercontent.com/ksingh7/ceph-cookbook/master/rbdmap -O /etc/init.d/rbdmap
chmod +x /etc/init.d/rbdmap
update-rc.d rbdmap defaults //ubuntu
chkconfig rbdmap on //centos
# cat /etc/ceph/keyring可获得你ceph的keyring
echo "rbd/rbd1 id=rbd,key = AQCYjBFZX8zLDxAAT7c3Azx/iuZLZh8ZYhs/JQ==" >> /etc/ceph/rbdmap
echo "/dev/rbd0 /mnt/ceph-disk1 xfs defaults, _netdev0 0" >> /etc/fstab
mkdir /mnt/ceph-disk1
/etc/init.d/rbdmap start
rbd resize
命令的--size<NEW_SIZE_IN_MB>参数
rbd resize --image rbd1 --size 20480 --name client.rbd
rbd info --image rbd1 --name client.rbd
dmesg | grep -i capacity
xfs_growfs -d /mnt/ceph-disk1
echo "Hello Ceph This is snapshot test" > /mnt/ceph-disk1/snapshot_test_file
ls -l /mnt/ceph-disk1
cat /mnt/ceph-disk1/snapshot_test_file
rbd snap create <pool-name>/<image-name>@<snap-name> 语法
rbd snap create rbd/rbd1@snapshot1 --name client.rbd
rbd snap ls rbd/rbd1 --name client.rbd
rm -f /mnt/ceph-disk1/*
rbd snap rollback rbd/rbd1@snapshot1 --name client.rbd
apt install ruby-sprite-factory
umount /mnt/ceph-disk1
mount /dev/rbd0 /mnt/ceph-disk1
ls -l /mnt/ceph-disk1
rbd snap rm rbd/rbd1@snapshot1 --name client.rbd
rbd snap purge rbd/rbd1 --name client.rbd
rbd create rbd2 --size 10240 --image-format 2 --image-feature layering --name client.rbd
rbd info --image rbd2 --name client.rbd
rbd snap create rbd/rbd2@snapshot_for_cloning --name client.rbd
rbd snap protect rbd/rbd2@snapshot_for_cloning --name client.rbd
rbd clone <pool-name>/<parent-image>@<snap-name><pool-name>/<child-image-name> //语法
rbd clone rbd/rbd2@snapshot_for_cloning rbd/clone_rbd2 --name client.rbd
rbd info rbd/clone_rbd2 --name client.rbd
rbd flatten rbd/clone_rbd2 --name client.rbd
rbd info --image clone_rbd2 --name client.rbd
rbd snap unprotect rbd/rbd2@snapshot_for_cloning --name client.rbd
rbd snap rm rbd/rbd2@snapshot_for_cloning --name client.rbd
sudo systemctl disable firewalld
sudo systemctl stop firewalld
sudo systemctl disable NetworkManager
sudo systemctl stop NetworkManager
sudo systemctl enable network
sudo systemctl start network
sudo yum install -y https://rdoproject.org/repos/rdo-release.rpm
sudo yum install -y centos-release-openstack-ocata
sudo yum update -y
sudo yum install -y openstack-packstack
sudo packstack --allinone
ceph osd pool create images 128
ceph osd pool create volumes 128
ceph osd pool create vms 128
sudo ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes,allow rwx pool=vms,allow rx pool=images'
sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
sudo ceph auth get-or-create client.glance | ssh root@os-node1 sudo tee /etc/ceph/ceph.client.glance.keyring
sudo ceph auth get-or-create client.cinder | ssh root@os-node1 sudo tee /etc/ceph/ceph.client.cinder.keyring
sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
sudo ceph auth get-key client.cinder | ssh root@os-node1 tee /etc/ceph/temp.client.cinder.key
ssh root@os-node1
cd /etc/ceph
ceph -s --name client.cinder --keyring ceph.client.cinder.keyring
ceph -s --name client.glance --keyring ceph.client.glance.keyring
cd /etc/ceph
uuidgen //生成随机uuid:4689f4f4-5964-4715-af62-3b6480eb94c1
cat > secret.xml << EOF
<secret ephemeral='no' private='no'>
<uuid>4689f4f4-5964-4715-af62-3b6480eb94c1</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF
virsh secret-define --file secret.xml //virsh secret-undefine uuid
virsh secret-set-value --secret 4689f4f4-5964-4715-af62-3b6480eb94c1 --base64 $(cat temp.client.cinder.key) && rm temp.client.cinder.key secret.xml
virsh secret-list
/etc/glance/glance-api.conf
文件,在[DEFAULT]部分添加代码如下:default_store = rbd
show_image_direct_url=True
cat /etc/glance/glance-api.conf | egrep -i "default_store|image_direct"
stores = rbd
rbd_store_ceph_conf=/etc/ceph/ceph.conf
rbd_store_user=glance
rbd_store_pool=images
rbd_store_chunk_size=8
cat /etc/glance/glance-api.conf | egrep -v "#default" | grep -i rbd
service openstack-glance-api restart
source /root/keystonerc_admin
glance image-list
wget http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img
glance image-create --name cirros_image --disk-format=qcow2 --container-format=bare < cirros-0.3.1-x86_64-disk.img
glance image-list
rados -p images ls --name client.glance --keyring /etc/ceph/ceph.client.glance.keyring | grep -i id
nova boot --flavor 1 --image e778db95-bad8-41ab-a6f1-4927c9dc87b7 vm1
enabled_backends
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool=volumes
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot=false
rbd_max_clone_depth=5
rbd_store_chunk_size=4
rados_connect_timeout=-1
glance_api_version=2
rbd_user=cinder
rbd_secret_uuid=6dd3cd39-a1e1-49b9-82a3-c91ee3530d48
cat /etc/cinder/cinder.conf | egrep "rbd|rados|version"|grep -v "#"
service openstack-cinder-volume restart
service openstack-cinder-api restart
service openstack-cinder-backup restart
source keystonerc_admin
cinder list
cinder create --display-name ceph-volume1 --display-description "Cinder volume on CEPH storage " 2
cinder list
rados -p volumes --name client.cinder --keyring ceph.client.cinder.keyring ls | grep -i id
/etc/nova/nova.conf
中的nova.virt.libvirt.volume选项部分,添加以下代码:rbd_user=cinder
rbd_secret_uuid=4689f4f4-5964-4715-af62-3b6480eb94c1
service openstack-nova-compute restart
nova list
cinder list
nova volume-attach d6ae1f6c-3a67-45ac-a495-0e660bfa4cbe c54ce4df-5a60-462b-9a93-b76f467bfa4d
cinder list
inject_partition = -2
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
cat /etc/nova/nova.conf | egrep "rbd|partition" | grep -v '#'
qemu-img convert -f qcow2 -O raw cirros-0.3.1-x86_64-disk.img cirros-0.3.1-x86_64-disk.raw
glance image-create --name cirros_raw_image --disk-format=raw --container-format=bare < cirros-0.3.1-x86_64-disk.raw
glance image-list
cinder create --image-id 9f175ca3-8e6c-462e-ab3c-5b12dff82f49 --display-name cirros-ceph-boot-volume 1
nova boot --flavor 1 --block-device-mapping vda=29aa621b-5e13-4bbe-9a66-cdab52d41c63::1 --image 9f175ca3-8e6c-462e-ab3c-5b12dff82f49 vm2
sudo ceph auth del client.glance
sudo ceph auth del client.cinder
sudo ceph osd pool delete volumes volumes --yes-i-really-really-mean-it
sudo ceph osd pool delete images images --yes-i-really-really-mean-it
sudo ceph osd pool delete vms vms --yes-i-really-really-mean-it
sudo ceph osd pool delete backups backups --yes-i-really-really-mean-it
cd /etc/ceph
ceph-authtool --create-keyring /etc/ceph/ceph.client.radosgw.keyring
chmod +r /etc/ceph/ceph.client.radosgw.keyring
ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.radosgw.gateway --gen-key
ceph-authtool -n client.radosgw.gateway --cap osd 'allow rws' --cap mon 'allow rwx' /etc/ceph/ceph.client.radosgw.keyring
ceph auth add client.radosgw.gateway -i /etc/ceph/ceph.client.radosgw.keyring
scp /etc/ceph/ceph.client.radosgw.keyring rgw-node1:/etc/ceph/ceph.client.radosgw.keyring
[client.radosgw.gateway]
host = rgw-node1
keyring = /etc/ceph/ceph.client.radosgw.keyring
rgw socket path = /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock
log file = /var/log/ceph/client.radosgw.gateway.log
rgw dns name = rgw-node1
rgw print continue = false
sed -i s"/DEFAULT_USER.*=.*'apache'/DEFAULT_USER='root'"/g /etc/rc.d/init.d/ceph-radosgw
ceph -s -k /etc/ceph/ceph.client.radosgw.keyring --name client.radosgw.gateway
radosgw-admin user create --uuid="testuser" --display-name="First User"
radosgw-admin subuser create --uid=testuser --subuser=testuser:swift --access=full
radosgw-admin key create --subuser=testuser:swift --key-type=swift --gen-secret
sudo apt-get install python-setuptools
sudo easy_install pip
sudo pip install --upgrade setuptools
sudo pip install --upgrade python-swiftclient
swift -A http://192.168.1.106:7480/auth/1.0 -U testuser:swift -K EQetFsHWwz5AgIQPxVeshKBFjqTdgSdXcSEOVGrP list
openstack endpoint delete 5f4ebe3e6e9a49579a62990f3a9f880b
openstack endpoint delete 6825c8ebfc6245d4b2bab958ca6ca72e
openstack endpoint delete 6983844d8c87442eb21f0f1c6d9b1612
openstack endpoint create --region RegionOne swift public http://192.168.1.106:7480/swift/v1
openstack endpoint create --region RegionOne swift internal http://192.168.1.106:7480/swift/v1
openstack endpoint create --region RegionOne swift admin http://192.168.1.106:7480/swift/v1
cat /etc/keystone/keystone.conf | grep -i admin_token
admin_token = a3a1eed3fa424ad3b58fe9d554f6c15c
mkdir -p /var/ceph/nss
openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | certutil -d /var/ceph/nss -A -n ca -t "TCu,Cu,Tuw"
openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | \
certutil -A -d /var/ceph/nss -n signing_cert -t "P,P,P"