1、新建Ceph集群,并生成集群配置文件和密钥文件(在ceph01上执行)
# mkdir /etc/ceph-cluster && cd /etc/ceph-cluster
# ceph-deploy new ceph01 ceph02 ceph03
/*
说明:一个ceph集群至少需要一个monitor才能运行,为提高可用性,一个ceph集群必须依赖多于1个的奇数个的monitor,例如3个或5个来形成仲裁。ceph使用Paxos算法来确保仲裁的一致性。
使用ceph-deploy mon create ceph04 可以增加一个mon节点,但需要先修改配置文件,增加ceph04以及要配置public_network和cluster_network。
*/
2、修改/etc/ceph-cluster/ceph.conf(在ceph01上执行)
# vim /etc/ceph-cluster/ceph.conf
[global]
fsid = 1c372e4e-8329-4393-8c9d-451f75c18f4f
mon_initial_members = ceph01, ceph02, ceph03
mon_host = 192.168.58.200,192.168.58.201,192.168.58.202
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
auth_allow_insecure_global_id_reclaim = false
public_network = 192.168.58.0/24
cluster_network = 172.16.10.0/24
rbd_default_features = 1
[mon.ceph01]
host=ceph01
mon_addr=192.168.58.200
[mon.ceph02]
host=ceph02
mon_addr=192.168.58.201
[mon.ceph03]
host=ceph03
mon_addr=192.168.58.202
创建mon节点(在ceph01上执行)
# cd /etc/ceph-cluster
# ceph-deploy mon create-initial
将keyring同步到各节点,以便其它节点可以执行ceph集群管理命令(在ceph01上执行)
# ceph-deploy --overwrite-conf admin ceph01 ceph02 ceph03 controller compute01 compute02
验证
# ceph -s
如果提示:clock skew detected on mon.xxxx,则需要修改配置mon_clock_drift_allowed = 0.5
# vim /etc/ceph-cluster/ceph.conf
追加
mon_clock_drift_allowed = 0.5
3、假设初始化失败,清除相关目录重新安装(所有主机)
systemctl stop ceph-crash
rm -rf /etc/ceph/*
rm -rf /var/lib/ceph/*
rm -rf /var/run/ceph/*
rm -rf /var/log/ceph/*
mkdir /var/lib/ceph/mds
mkdir /var/lib/ceph/osd
4、部署mgr节点(在ceph01节点上执行)
# cd /etc/ceph-cluster
# ceph-deploy mgr create ceph01 ceph02 ceph03
ceph-mgr进程是主备模式,同一时刻只有一个节点工作,其他节点处于standby
验证
# ceph -s
5、部署osd
当使用BlueStore OSD混合使用传统和固态硬盘时,适当调整RocksDB逻辑卷( block.db
)的大小非常重要。Red Hat建议RocksDB逻辑卷不小于对象、文件和混合工作负载块大小的4%。红帽支持BlueStore块大小的1%,支持RocksDB和OpenStack块工作负载。例如,如果对象工作负载的块大小为1 TB,则至少创建40 GB RocksDB逻辑卷。
参考:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/4/html/administration_guide/osd-bluestore
实验环境下:
创建日志分区和数据分区逻辑卷(在ceph01、ceph02、ceph03上执行)
# vim /etc/fstab
把/dev/mapper/centos-home的这行删除
umount /dev/centos/home
lvremove /dev/centos/home
lvcreate -L 5G -n lvwal01 centos
lvcreate -L 20G -n lvbdb01 centos
lvcreate -L 5G -n lvwal02 centos
lvcreate -L 20G -n lvbdb02 centos
lvcreate -L 5G -n lvwal03 centos
lvcreate -L 20G -n lvbdb03 centos
指定日志分区和数据分区,存储类型为bluestore(在ceph01上执行)
ceph-deploy osd create ceph01 --data /dev/sdb --block-wal centos/lvwal01 --block-db centos/lvbdb01 --bluestore
ceph-deploy osd create ceph02 --data /dev/sdb --block-wal centos/lvwal01 --block-db centos/lvbdb01 --bluestore
ceph-deploy osd create ceph03 --data /dev/sdb --block-wal centos/lvwal01 --block-db centos/lvbdb01 --bluestore
ceph-deploy osd create ceph01 --data /dev/sdc --block-wal centos/lvwal02 --block-db centos/lvbdb02 --bluestore
ceph-deploy osd create ceph02 --data /dev/sdc --block-wal centos/lvwal02 --block-db centos/lvbdb02 --bluestore
ceph-deploy osd create ceph03 --data /dev/sdc --block-wal centos/lvwal02 --block-db centos/lvbdb02 --bluestore
ceph-deploy osd create ceph01 --data /dev/sdd --block-wal centos/lvwal03 --block-db centos/lvbdb03 --bluestore
ceph-deploy osd create ceph02 --data /dev/sdd --block-wal centos/lvwal03 --block-db centos/lvbdb03 --bluestore
ceph-deploy osd create ceph03 --data /dev/sdd --block-wal centos/lvwal03 --block-db centos/lvbdb03 --bluestore
验证:
# ceph -s
# ceph osd tree
6、创建存储池、根据实际OSD数量修改PG数目
用于存放 cinder的volume,即存放在创建虚拟机时选择”创建新卷“
# ceph osd pool create volumes 128
用于存放镜像
# ceph osd pool create images 32
用于存放备份
# ceph osd pool create backups 32
用于存放vm,保存在创建虚拟机时,选择”不创建新卷“
# ceph osd pool create vms 128
查看创建的存储池
# ceph osd pool ls
7、在ceph01上为cinder、glance、cinder-backup用户创建密钥,允许其访问Ceph存储池
创建用户client.cinder,对 volumes 存储池有rwx权限,对vms存储池有rwx权限,对images池有rx权限
# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
创建用户client.glance,对 images 存储池有rwx权限
# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
创建用户client.cinder-backup,对 backups 存储池有rwx权限
# ceph auth get-or-create client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups'
将glance的keyring保存到 controller(glance 服务所在节点)上
# ceph auth get-or-create client.glance | ssh controller tee /etc/ceph/ceph.client.glance.keyring
# ssh controller chown glance:glance /etc/ceph/ceph.client.glance.keyring
将 cinder 的keyring保存到(控制节点、计算节点、存储节点 服务所在节点)上
# ceph auth get-or-create client.cinder | ssh controller tee /etc/ceph/ceph.client.cinder.keyring # 还有compute0{1..n}上
# ssh controller chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring # 还有compute0{1..n}上
/*
使用for循环完成
for host01 in controller compute01 compute02
do
ceph auth get-or-create client.cinder | ssh $host01 tee /etc/ceph/ceph.client.cinder.keyring
ssh $host01 chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
done
*/
将 cinder-backup 的keyring保存到 (cinder-backup 服务所在节点,此处是controller/computer01/computer02)上
# ceph auth get-or-create client.cinder-backup | ssh controller tee /etc/ceph/ceph.client.cinder-backup.keyring # 还有compute0{1..n}上
# ssh controller chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring # 还有compute0{1..n}上
/*
使用for循环完成
for host02 in controller compute01 compute02
do
ceph auth get-or-create client.cinder-backup | ssh $host02 tee /etc/ceph/ceph.client.cinder-backup.keyring
ssh $host02 chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
ceph auth get-key client.cinder | ssh $host02 tee client.cinder.key
done
*/
/*
不要直接用admin的key,因为不同用户要读到该密钥文件,要修改属组和属主,否则没有权限(当然可以将ceph.client.admin.keyring文件改为775允许cinder/glance用户读取,但不推荐)
# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQCyTYZheH+KKhAAw227TN1qho/8OMhGTyL+UA==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
*/
8、在计算节点上向libvirt添加秘钥
先在计算节点compute01上操作,生成密钥,并保证我们生成的密钥字符串是安全的
# UUID=82e9e6fa-e6d5-42b3-b682-9852aa14e780 //可以使用$(uuidgen)
# cd /etc/ceph
# cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>${UUID}</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF
查看
# cat /etc/ceph/secret.xml
添加密钥
# virsh secret-define --file secret.xml
# virsh secret-set-value --secret ${UUID} --base64 $(cat /etc/ceph/ceph.client.cinder.keyring | grep key | awk -F ' ' '{ print $3 }')
/*
说明:保存此处生成的UUID的值,后面Cinder以及Nova的配置中需要用到,本示例中的UUID为:82e9e6fa-e6d5-42b3-b682-9852aa14e780
如果添加错误,需要删除,则执行如下命令
# virsh secret-undefine 82e9e6fa-e6d5-42b3-b682-9852aa14e780
*/
查看添加后的密钥
# virsh secret-list
UUID 用量
--------------------------------------------------------------------------------
82e9e6fa-e6d5-42b3-b682-9852aa14e780 ceph client.cinder secret
在其他计算节点操作
1、将secret.xml 拷贝到其他计算节点
# scp secret.xml controller:/etc/ceph/
# scp secret.xml compute02:/etc/ceph/
在 controller 节点上操作
# UUID=82e9e6fa-e6d5-42b3-b682-9852aa14e780
# cd /etc/ceph
# virsh secret-define --file secret.xml
# virsh secret-set-value --secret ${UUID} --base64 $(cat /etc/ceph/ceph.client.cinder.keyring | grep key | awk -F ' ' '{ print $3 }')
在 compute02 节点上操作
# UUID=82e9e6fa-e6d5-42b3-b682-9852aa14e780
# cd /etc/ceph
# virsh secret-define --file secret.xml
# virsh secret-set-value --secret ${UUID} --base64 $(cat /etc/ceph/ceph.client.cinder.keyring | grep key | awk -F ' ' '{ print $3 }')
验证
# virsh secret-list