频道栏目
首页 > 资讯 > 云计算 > 正文

部署Ceph集群解析

18-07-18        来源:[db:作者]  
收藏   我要投稿

存储:

RAID:独立磁盘冗余阵列

性能、容错、空间

分布式存储:CEPH

组成

MON:监视器。MON通过保存一系列集群状态map来监视集群的组件。MON因为保存集群状态,要防止单点故障,所以需要多台;另外,MON需要是奇数,如果出现意见分岐,采用投票机制,少数服从多数。

OSD:对象存储设备。真正存储数据的组件。一般来说,每块参与存储的磁盘都需要一个OSD进程。

(3)MDS:元数据服务器。只有CephFS需要它。

元数据:metadata,存储数据的数据。比如一本书内容是数据,那么书的作者、出版社、出版时间之类的信息就是元数据。

RADOS:可靠自主分布式对象存储。它是ceph存储的基础,保证一切都以对象形式存储。

RBD:RADOS块设备,提供块存储

CephFS:提供文件系统级别存储

RGW:RADOS网关,提供对象存储

 

存储分类:

块存储:提供硬盘,如iSCSI

文件级别存储:共享文件夹

对象存储:一切皆对象

CEPH环境准备

准备6台虚拟机

主机名、IP地址

在物理主机上配置名称解析

NTP网络时间协议,基于UDP123端口。用于时间同步

时区:地球一圈360度,经度每15度角一个时区,共24个时区。以英国格林威治这个城市所在纵切面为基准。北京在东八区。

夏季节约时间:夏令时。DST

Stratum:时间服务器的层级。

时间准确度:原子钟。

为node1-3各添加3块10GB的磁盘

可以在虚拟机不关机的情况下,直接添加硬盘

安装ceph

在node1上安装部署软件

创建ceph部署工具的工作目录

创建参与集群节点的配置文件

创建参与集群节点的配置文件

把node1-3的vdb作为日志盘。Ext/xfs都是日志文件系统,一个分区分成日志区和数据区。为了更好的性能,vdb专门作为vdc和vdd

的日志盘。

 

创建OSD设备

ceph -s 如果出现healthHEALTH_OK表示正常

[root@rootroom9pc01cluster]# clone-vm7

EnterVM number: 1

CreatingVirtual Machine disk image...... [OK]

Definingnew virtual machine...... [OK]

[root@rootroom9pc01cluster]# clone-vm7

EnterVM number: 2

CreatingVirtual Machine disk image...... [OK]

Definingnew virtual machine...... [OK]

[root@rootroom9pc01cluster]# clone-vm7

EnterVM number: 3

CreatingVirtual Machine disk image...... [OK]

Definingnew virtual machine...... [OK]

[root@rootroom9pc01cluster]#

ifconifgeth0 “192.168.4.1/24”

[root@localhost~]# nmcli connection modify eth0 ipv4.method manual ipv4.addresses"192.168.4.1/24" connection.autoconnect yes

[root@localhost~]# nmcli connection up eth0

连接已成功激活(D-Bus活动路径:/org/freedesktop/NetworkManager/ActiveConnection/4)

[root@localhost~]#

ifconifgeth0 “192.168.4.2/24”

[root@localhost~]# nmcli connection modify eth0 ipv4.method manual ipv4.addresses"192.168.4.2/24" connection.autoconnect yes

[root@localhost~]# nmcli connection up eth0

连接已成功激活(D-Bus活动路径:/org/freedesktop/NetworkManager/ActiveConnection/4)

[root@localhost~]#

ifconifgeth0 “192.168.4.3/24”

[root@localhost~]# nmcli connection modify eth0 ipv4.method manual ipv4.addresses"192.168.4.3/24" connection.autoconnect yes

[root@localhost~]# nmcli connection up eth0

连接已成功激活(D-Bus活动路径:/org/freedesktop/NetworkManager/ActiveConnection/4)

[root@localhost~]#

[root@node4~]# scp /etc/hosts 192.168.4.1:/etc/

root@192.168.4.1'spassword:

hosts 100% 369 307.6KB/s 00:00

[root@node4~]#

[root@node4~]# scp /etc/hosts 192.168.4.2:/etc/

root@192.168.4.2'spassword:

hosts 100% 369 421.1KB/s 00:00

[root@node4~]#

[root@node4~]# scp /etc/hosts 192.168.4.2:/etc/

root@192.168.4.2'spassword:

hosts 100% 369 421.1KB/s 00:00

[root@node4~]#

[root@node4~]# scp /etc/hosts 192.168.4.1:/etc/

root@192.168.4.1'spassword:

hosts 100% 369 307.6KB/s 00:00

[root@node4~]# scp /etc/hosts 192.168.4.2:/etc/

root@192.168.4.2'spassword:

hosts 100% 369 421.1KB/s 00:00

[root@node4~]# scp /etc/hosts 192.168.4.3:/etc/

root@192.168.4.3'spassword:

hosts 100% 369 452.2KB/s 00:00

[root@node4~]# cat /etc/yum.repos.d/server.repo

[rhel7.4]

name=rhel7

baseurl=http://192.168.4.254/rhel7

enabled=1

gpgcheck=0

[mon]

name=mon

baseurl=ftp://192.168.4.254/ceph/rhceph-2.0-rhel-7-x86_64/MON

enabled=1

gpgcheck=0

[osd]

name=osd

baseurl=ftp://192.168.4.254/ceph/rhceph-2.0-rhel-7-x86_64/OSD

enabled=1

gpgcheck=0

[tools]

name=tools

baseurl=ftp://192.168.4.254/ceph/rhceph-2.0-rhel-7-x86_64/Tools

enabled=1

gpgcheck=0

[root@node4~]# scp /etc/yum.repos.d/server.repo 192.168.4.1:/etc/yum.repos.d/

root@192.168.4.1'spassword:

server.repo

[root@node4~]# scp /etc/yum.repos.d/server.repo 192.168.4.2:/etc/yum.repos.d/

root@192.168.4.2'spassword:

server.repo 100% 377 16.0KB/s 00:00

[root@node4~]# scp /etc/yum.repos.d/server.repo 192.168.4.3:/etc/yum.repos.d/

root@192.168.4.3'spassword:

server.repo

[root@localhost~]# hostnamectl set-hostname node1

[root@localhost~]# hostnamectl set-hostname node2

[root@localhost~]# hostnamectl set-hostname node3

[root@node6~]# cat /etc/chrony.conf

allow192.168.4.0/24

localstratum 10

systemctlrestart chronyd

[root@node4~]# scp /etc/chrony.conf 192.168.4.1:/etc/

root@192.168.4.1'spassword:

chrony.conf 100% 1131 807.7KB/s 00:00

[root@node4~]# scp /etc/chrony.conf 192.168.4.2:/etc/

root@192.168.4.2'spassword:

chrony.conf 100% 1131 632.6KB/s 00:00

[root@node4~]# scp /etc/chrony.conf 192.168.4.3:/etc/

root@192.168.4.3'spassword:

chrony.conf

server192.168.4.6 iburst

systemctlrestart chronyd

[root@node4~]# scp /etc/hosts 192.168.4.1:/etc/

root@192.168.4.1'spassword:

hosts 100% 369 307.6KB/s 00:00

[root@node4~]# scp /etc/hosts 192.168.4.2:/etc/

root@192.168.4.2'spassword:

hosts 100% 369 421.1KB/s 00:00

[root@node4~]# scp /etc/hosts 192.168.4.3:/etc/

root@192.168.4.3'spassword:

hosts 100% 369 452.2KB/s 00:00

[root@node4~]# cat /etc/yum.repos.d/server.repo

[rhel7.4]

name=rhel7

baseurl=http://192.168.4.254/rhel7

enabled=1

gpgcheck=0

[mon]

name=mon

baseurl=ftp://192.168.4.254/ceph/rhceph-2.0-rhel-7-x86_64/MON

enabled=1

gpgcheck=0

[osd]

name=osd

baseurl=ftp://192.168.4.254/ceph/rhceph-2.0-rhel-7-x86_64/OSD

enabled=1

gpgcheck=0

[tools]

name=tools

baseurl=ftp://192.168.4.254/ceph/rhceph-2.0-rhel-7-x86_64/Tools

enabled=1

gpgcheck=0

[root@node1~]# lsblk

NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT

sr0 11:0 1 1024M 0 rom

vda 252:0 0 20G 0 disk

├─vda1 252:1 0 1G 0 part /boot

└─vda2 252:2 0 19G 0 part

├─rhel-root253:0 0 17G 0 lvm /

└─rhel-swap253:1 0 2G 0 lvm [SWAP]

vdb 252:16 0 10G 0 disk

vdc 252:32 0 10G 0 disk

vdd 252:48 0 10G 0 disk

[root@node1~]# lsblk

NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT

sr0 11:0 1 1024M 0 rom

vda 252:0 0 20G 0 disk

├─vda1 252:1 0 1G 0 part /boot

└─vda2 252:2 0 19G 0 part

├─rhel-root253:0 0 17G 0 lvm /

└─rhel-swap253:1 0 2G 0 lvm [SWAP]

vdb 252:16 0 10G 0 disk

vdc 252:32 0 10G 0 disk

vdd 252:48 0 10G 0 disk

[root@node1~]# lsblk

NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT

sr0 11:0 1 1024M 0 rom

vda 252:0 0 20G 0 disk

├─vda1 252:1 0 1G 0 part /boot

└─vda2 252:2 0 19G 0 part

├─rhel-root253:0 0 17G 0 lvm /

└─rhel-swap253:1 0 2G 0 lvm [SWAP]

vdb 252:16 0 10G 0 disk

vdc 252:32 0 10G 0 disk

vdd 252:48 0 10G 0 disk

[root@node1~]# yum -y install ceph-deploy

[root@node1~]# ceph-deploy –help

[root@node1~]# mkdir ceph-cluster

[root@node1~]# cd ceph-cluster/

[root@node1ceph-cluster]#

[root@node1ceph-cluster]# ssh-keygen

Generatingpublic/private rsa key pair.

Enterfile in which to save the key (/root/.ssh/id_rsa):

/root/.ssh/id_rsaalready exists.

Overwrite(y/n) y

Enterpassphrase (empty for no passphrase):

Entersame passphrase again:

Youridentification has been saved in /root/.ssh/id_rsa.

Yourpublic key has been saved in /root/.ssh/id_rsa.pub.

Thekey fingerprint is:

SHA256:q/H3V0D9mymSQadfn0A8/Dj94M/IAmurBcDamvB4FoMroot@node1

Thekey's randomart image is:

+---[RSA2048]----+

| o . |

| . . *. .|

| o . +.= .|

| . o . o +o+.|

| E + . S +.+oB|

| + = o.o o.=+|

| . * . . .o...= |

| o + .+ . + o|

| . o+.o.o |

+----[SHA256]-----+

27 ssh-copy-id 192.168.4.1

28 ssh-copy-id 192.168.4.2

29 ssh-copy-id 192.168.4.3

30 ssh-copy-id 192.168.4.6

33 ssh node2

[root@node1ceph-cluster]# ceph-deploy new node{1..3}

[root@node1ceph-cluster]# ls

ceph.conf ceph-deploy-ceph.log ceph.mon.keyring

[root@node1ceph-cluster]# ceph-deploy install node1 node2 node3

[root@node1ceph-cluster]# ceph-deploy install node1 node2 node3

[root@node1ceph-cluster]# parted /dev/vdb

GNUParted 3.1

使用 /dev/vdb

Welcometo GNU Parted! Type 'help' to view a list of commands.

(parted)mklabel gpt

(parted)mkpart primary 1M 50%

(parted)mkpart primary 50% 100%

(parted)print

Model:Virtio Block Device (virtblk)

Disk/dev/vdb: 10.7GB

Sectorsize (logical/physical): 512B/512B

PartitionTable: gpt

DiskFlags:

Number Start End Size File system Name 标志

1 1049kB 5369MB 5368MB primary

2 5369MB 10.7GB 5368MB primary

(parted)quit

信息:You may need to update /etc/fstab.

[root@node1ceph-cluster]# lsblk

NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT

sr0 11:0 1 1024M 0 rom

vda 252:0 0 20G 0 disk

├─vda1 252:1 0 1G 0 part /boot

└─vda2 252:2 0 19G 0 part

├─rhel-root253:0 0 17G 0 lvm /

└─rhel-swap253:1 0 2G 0 lvm [SWAP]

vdb 252:16 0 10G 0 disk

├─vdb1 252:17 0 5G 0 part

└─vdb2 252:18 0 5G 0 part

vdc 252:32 0 10G 0 disk

vdd 252:48 0 10G 0 disk

[root@node1ceph-cluster]#

[root@node2~]# parted /dev/vdb

GNUParted 3.1

使用 /dev/vdb

Welcometo GNU Parted! Type 'help' to view a list of commands.

(parted)mklabel gpt

(parted)mkpart primary 1M 50%

(parted)mkpart primary 50% 100%

(parted)print

Model:Virtio Block Device (virtblk)

Disk/dev/vdb: 10.7GB

Sectorsize (logical/physical): 512B/512B

PartitionTable: gpt

DiskFlags:

Number Start End Size File system Name 标志

1 1049kB 5369MB 5368MB primary

2 5369MB 10.7GB 5368MB primary

(parted)quit

信息:You may need to update /etc/fstab.

[root@node2~]# lsblk

NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT

sr0 11:0 1 1024M 0 rom

vda 252:0 0 20G 0 disk

├─vda1 252:1 0 1G 0 part /boot

└─vda2 252:2 0 19G 0 part

├─rhel-root253:0 0 17G 0 lvm /

└─rhel-swap253:1 0 2G 0 lvm [SWAP]

vdb 252:16 0 10G 0 disk

├─vdb1 252:17 0 5G 0 part

└─vdb2 252:18 0 5G 0 part

vdc 252:32 0 10G 0 disk

vdd 252:48 0 10G 0 disk

[root@node2~]#

[root@node3~]# parted /dev/vdb

GNUParted 3.1

使用 /dev/vdb

Welcometo GNU Parted! Type 'help' to view a list of commands.

(parted)mklabel gpt

(parted)mkpart primary 1M 50%

(parted)mkpart primary 50% 100%

(parted)print

Model:Virtio Block Device (virtblk)

Disk/dev/vdb: 10.7GB

Sectorsize (logical/physical): 512B/512B

PartitionTable: gpt

DiskFlags:

Number Start End Size File system Name 标志

1 1049kB 5369MB 5368MB primary

2 5369MB 10.7GB 5368MB primary

(parted)quit

信息:You may need to update /etc/fstab.

[root@node3~]# lsblk

NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT

sr0 11:0 1 1024M 0 rom

vda 252:0 0 20G 0 disk

├─vda1 252:1 0 1G 0 part /boot

└─vda2 252:2 0 19G 0 part

├─rhel-root253:0 0 17G 0 lvm /

└─rhel-swap253:1 0 2G 0 lvm [SWAP]

vdb 252:16 0 10G 0 disk

├─vdb1 252:17 0 5G 0 part

└─vdb2 252:18 0 5G 0 part

vdc 252:32 0 10G 0 disk

vdd 252:48 0 10G 0 disk

[root@node3~]#

[root@node1ceph-cluster]# chown ceph.ceph /dev/vdb

[root@node1ceph-cluster]# chown ceph.ceph /dev/vdb1

[root@node1ceph-cluster]# chown ceph.ceph /dev/vdb2

[root@node2~]# chown ceph.ceph /dev/vdb

[root@node2~]# chown ceph.ceph /dev/vdb1

[root@node2~]# chown ceph.ceph /dev/vdb2

[root@node3~]# chown ceph.ceph /dev/vdb

[root@node3~]# chown ceph.ceph /dev/vdb1

[root@node3~]# chown ceph.ceph /dev/vdb2

[root@node1ceph-cluster]#

44 ceph-deploy disk zap node1:vdc node1:vdd

45 ceph-deploy disk zap node2:vdc node2:vdd

46 ceph-deploy disk zap node3:vdc node3:vdd

47 ceph-deploy osd create node1:vdc:/dev/vdb1 node1:vdd:/dev/vdb2

48 ceph-deploy osd create node2:vdc:/dev/vdb1 node2:vdd:/dev/vdb2

49 ceph-deploy osd create node3:vdc:/dev/vdb1 node3:vdd:/dev/vdb2

50 ceph -s

51 ceph osd lspools

52 history

相关TAG标签
上一篇:IBERT核调试
下一篇:2000:ASCII码排序 教程
相关文章
图文推荐

关于我们 | 联系我们 | 广告服务 | 投资合作 | 版权申明 | 在线帮助 | 网站地图 | 作品发布 | Vip技术培训 | 举报中心

版权所有: 红黑联盟--致力于做实用的IT技术学习网站