ceph集群搭建

陈德兵 1年前 ⋅ 1287 阅读

示例:

1、vim /etc/hosts

192.168.10.51 ceph1

192.168.10.52 ceph2

192.168.10.53 ceph3


2、ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa

cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys

cat ~/.ssh/id_rsa_ceph2.pub >> ~/.ssh/authorized_keys

cat ~/.ssh/id_rsa_ceph3.pub >> ~/.ssh/authorized_keys

3、sz authorized_keys 

#将免认证文件发给3台机器

4、fdisk -l /dev/sdb

parted -s /dev/sdb mklabel gpt mkpart primary xfs 0% 100%

mkfs.xfs -f /dev/sdb

fdisk -s /dev/sdb

blkid -o value -s TYPE /dev/sdb

5、apt install ceph-deploy

mkdir ceph-cluster && cd ceph-cluster

ceph-deploy new ceph1 ceph2 ceph3

6、vim ceph.conf 

osd pool default size = 2

public network = 192.168.10.0/24

cluster network = 192.168.10.0/24

osd pool default pg num = 128

osd pool default pgp num = 128

mon clock drift allowed = 2

mon clock drift warn backoff = 30


7、ceph-deploy install ceph1 ceph2 ceph3

ceph-deploy  mon create-initial

ceph-deploy mgr create ceph1

ceph-deploy gatherkeys ceph1

8、ceph-deploy disk list ceph1 ceph2 ceph3

ceph-deploy disk zap ceph1:/dev/sdb ceph2:/dev/sdb ceph3:/dev/sdb

ceph-deploy osd prepare ceph1:/dev/sdb ceph2:/dev/sdb ceph3:/dev/sdb

ceph-deploy osd activate ceph1:/dev/sdb ceph2:/dev/sdb ceph3:/dev/sdb

ceph-deploy disk list ceph1 ceph2 ceph3

fdisk -l /dev/sdb

ceph-deploy admin ceph1 ceph2 ceph3

chmod 644 /etc/ceph/ceph.client.admin.keyring

ceph health

ceph -s

9、ceph osd pool create rbd 128 128  #创建pool

(ceph osd pool application enable rbd rbd)

(ceph osd pool create pool1 128 128;ceph osd pool application enable pool1 rbd)

#如果pool和pgs为0,执行添加pool

10、ssh-copy-id 192.168.154.129

ceph-deploy install 192.168.154.129

ceph-deploy config push 192.168.154.129

sz ceph.client.admin.keyring 

11、ceph osd pool get rbd size

#可以查看副本数量


12、ceph.client.admin.keyring放到被挂载机器上/etc/ceph

cat /etc/ceph/ceph.client.admin.keyring >> /etc/ceph/keyring

rbd create rbd02 --size 8388608‬ --image-feature layering --name client.admin #创建设备块

rbd map --image rbd02 --name client.admin /dev/rdb02

rbd showmapped --name client.admin  #映射

fdisk -l

mkfs.xfs /dev/rbd0

mount /dev/rbd0 /data  #挂载

#开机自动挂载脚本

rbd map --image rbd02 --name client.admin /dev/rdb02

mount /dev/rbd0 /data

#更新配置文件

ceph-deploy --overwrite-conf config push ceph{1..3}

13、卸载映射:rbd unmap /dev/rbd0

查看详情:rbd info rbd02 查看镜像详情

删除镜像:rbd rm rbd02

查看镜像列表:rbd ls


没源增加阿里源

export CEPH_DEPLOY_REPO_URL=https://mirrors.aliyun.com/ceph/debian-jewel

echo deb https://mirrors.aliyun.com/ceph/debian-jewel/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list

export CEPH_DEPLOY_GPG_URL=https://mirrors.aliyun.com/ceph/keys/release.asc

wget -q -O- 'http://mirrors.aliyun.com/ceph/keys/release.asc' | sudo apt-key add -


补充:

映射到一台机器之后,通过发布nfs服务再挂载到其他机器进行共享同空间


注意:本文归作者所有,未经作者允许,不得转载

全部评论: 0

    我有话说: