搭建ceph集群
所有节点关闭firewall和selinux
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
vim /etc/selimux/config
selinux=disabled
添加国内ceph源
cat >/etc/yum.repos.d/ceph.repo<<EOF
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/
gpgcheck=0
priority=1
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/
gpgcheck=0
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
EOF
yum makecache
在每一个node节点执行
useradd ceph-admin
echo 'ceph-admin' | passwd --stdin ceph-admin
echo "ceph-admin ALL = (root) NOPASSWD:ALL" > /etc/sudoers.d/ceph-admin
chmod 0440 /etc/sudoers.d/ceph-admin
#配置sshd可以使用password登录
sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config
systemctl reload sshd
#配置sudo不需要tty
sed -i 's/Default requiretty/#Default requiretty/' /etc/sudoers
#在所有节点配置hosts
cat >>/etc/hosts<<EOF
192.168.1.111 ceph1
192.168.1.112 ceph2
192.168.1.113 ceph3
EOF
#格式化需要做osd的磁盘
mkfs.xfs /dev/sdb
在admin节点执行
su - ceph-admin
ssh-keygen
ssh-copy-id ceph-admin@ceph1
ssh-copy-id ceph-admin@ceph2
ssh-copy-id ceph-admin@ceph3
admin节点安装ceph-deploy
yum install -y ceph-deploy
mkdir my-cluster
cd my-cluster
进行节点部署
ceph-deploy new ceph1 ceph2 ceph3
#安装完之后my-cluster目录下面会生成三个文件
ceph.conf
ceph-deploy-ceph.log
ceph.mon.keyring
#编辑ceph.conf配置文件,在最后添加一下信息
sudo vim ~/my-cluster/ceph.conf
public network = 192.168.0.0/23
[osd]
osd_max_object_name_len = 256
osd_max_object_namespace_len = 64
rbd_default_features = 1
osd_pool_default_size = 3
filestore_xattr_use_omap = true
[mon]
mon_pg_warn_max_per_osd = 1000
#在管理节点直接给各节点安装环境包
ceph-deploy install ceph1 ceph2 ceph3
#配置初始的monitor并收集所有密钥:
ceph-deploy mon create-initial
#把配置信息拷贝到各节点
ceph-deploy admin ceph1 ceph2 ceph3
各节点创建osd(需要创建的磁盘均需要执行)
ceph-deploy --overwrite-conf config push ceph{1..3}
ceph-deploy disk zap ceph1 /dev/sdb
ceph-deploy gatherkeys ceph{1..3}
ceph-deploy osd create ceph1 --data /dev/sdb
启动mds、mgr
ceph-deploy mds create ceph1 ceph2 ceph3
ceph-deploy mgr create ceph1 ceph2 ceph3
开启dashboard模块,启用浏览器界面
#在开启dashboard模块之前要注意,因为我们是使用ceph-admin一般用户进行安装,所有无法调用/etc/ceph/下面的文件,将/etc/ceph目录下面的文件属主属组全部更改为ceph-admin
sudo chown -R ceph-admin /etc/ceph
#然后加载dashboard模块
ceph mgr module enable dashboard
#dashboard管理页面
http://192.168.1.111:7000
CephFS需要使用两个Pool来分别存储数据和元数据
ceph osd pool create fs_data 128
ceph osd pool create fs_metadata 128
ceph osd lspools
创建一个CephFS
ceph fs new cephfs fs_metadata fs_data
第三方客户端使用cephfs
mount -t ceph ceph1:6789,ceph2:6789,ceph3:6789:/ /mnt/mycephfs -o name=admin,secret=`ceph auth print-key client.admin`
卸载ceph
ceph-deploy purge ceph01
ceph-deploy purgedata ceph01
rm -rf /var/lib/ceph
rm -rf /etc/ceph
rm -rf /var/run/ceph/