Ceph Octopus : Configure Ceph Cluster #22021/04/01 |
Install Distributed File System Ceph to Configure Storage Cluster.
For example on here, Configure Ceph Cluster with 3 Nodes like follows.
Furthermore, each Storage Node has a free block device to use on Ceph Nodes. (use [/dev/sdb] on this example) | +----------------------------+----------------------------+ | | | |10.0.0.51 |10.0.0.52 |10.0.0.53 +-----------+-----------+ +-----------+-----------+ +-----------+-----------+ | [node01.srv.world] | | [node02.srv.world] | | [node03.srv.world] | | Object Storage +----+ Object Storage +----+ Object Storage | | Monitor Daemon | | | | | | Manager Daemon | | | | | +-----------------------+ +-----------------------+ +-----------------------+ |
[1] | |
[2] | Configure OSD (Object Storage Device) to each Node from Admin Node. Block devices ([/dev/sdb] on this example) are formatted for OSD, Be careful if some existing data are saved. |
# if Firewalld is running on each Node, allow ports
[root@node01 ~]# for NODE in node01 node02 node03
do
ssh $NODE "firewall-cmd --add-service=ceph --permanent; firewall-cmd --reload"
done
# configure settings for OSD to each Node
[root@node01 ~]# for NODE in node01 node02 node03
do
if [ ! ${NODE} = "node01" ]
then
scp /etc/ceph/ceph.conf ${NODE}:/etc/ceph/ceph.conf
scp /etc/ceph/ceph.client.admin.keyring ${NODE}:/etc/ceph
scp /var/lib/ceph/bootstrap-osd/ceph.keyring ${NODE}:/var/lib/ceph/bootstrap-osd
fi
ssh $NODE \
"chown ceph. /etc/ceph/ceph.* /var/lib/ceph/bootstrap-osd/*; \
parted --script /dev/sdb 'mklabel gpt'; \
parted --script /dev/sdb "mkpart primary 0% 100%"; \
ceph-volume lvm create --data /dev/sdb1"
done
Running command: /usr/bin/ceph-authtool --gen-print-key Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4321d2ac-ba79-4947-a19f-65205c08e416 Running command: /usr/sbin/vgcreate --force --yes ceph-42ee91b6-b00c-4f52-a54b-ba03ed897365 /dev/sdb1 stdout: Physical volume "/dev/sdb1" successfully created. stdout: Volume group "ceph-42ee91b6-b00c-4f52-a54b-ba03ed897365" successfully created Running command: /usr/sbin/lvcreate --yes -l 20479 -n osd-block-4321d2ac-ba79-4947-a19f-65205c08e416 ceph-42ee91b6-b00c-4f52-a54b-ba03ed897365 stdout: Logical volume "osd-block-4321d2ac-ba79-4947-a19f-65205c08e416" created. Running command: /usr/bin/ceph-authtool --gen-print-key Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-0 Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-42ee91b6-b00c-4f52-a54b-ba03ed897365/osd-block-4321d2ac-ba79-4947-a19f-65205c08e416 Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 Running command: /usr/bin/ln -s /dev/ceph-42ee91b6-b00c-4f52-a54b-ba03ed897365/osd-block-4321d2ac-ba79-4947-a19f-65205c08e416 /var/lib/ceph/osd/ceph-0/block Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap stderr: got monmap epoch 2 Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQCDEGRgw8DjGhAAw40Vh0+A1rvAL4Hvh06fVQ== stdout: creating /var/lib/ceph/osd/ceph-0/keyring added entity osd.0 auth(key=AQCDEGRgw8DjGhAAw40Vh0+A1rvAL4Hvh06fVQ==) Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 4321d2ac-ba79-4947-a19f-65205c08e416 --setuser ceph --setgroup ceph stderr: 2021-03-31T15:02:44.963+0900 7f60425aef40 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid stderr: 2021-03-31T15:02:45.554+0900 7f60425aef40 -1 freelist read_size_meta_from_db missing size meta in DB --> ceph-volume lvm prepare successful for: /dev/sdb1 Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-42ee91b6-b00c-4f52-a54b-ba03ed897365/osd-block-4321d2ac-ba79-4947-a19f-65205c08e416 --path /var/lib/ceph/osd/ceph-0 --no-mon-config Running command: /usr/bin/ln -snf /dev/ceph-42ee91b6-b00c-4f52-a54b-ba03ed897365/osd-block-4321d2ac-ba79-4947-a19f-65205c08e416 /var/lib/ceph/osd/ceph-0/block Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 Running command: /usr/bin/systemctl enable ceph-volume@lvm-0-4321d2ac-ba79-4947-a19f-65205c08e416 stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-4321d2ac-ba79-4947-a19f-65205c08e416.service -> /usr/lib/systemd/system/ceph-volume@.service. Running command: /usr/bin/systemctl enable --runtime ceph-osd@0 stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service -> /usr/lib/systemd/system/ceph-osd@.service. Running command: /usr/bin/systemctl start ceph-osd@0 --> ceph-volume lvm activate successful for osd ID: 0 --> ceph-volume lvm create successful for: /dev/sdb1 ..... ..... # confirm cluster status # that's OK if [HEALTH_OK] [root@node01 ~]# ceph -s cluster: id: 7def6ab9-42d6-4385-af46-79ba8ccefdcd health: HEALTH_OK services: mon: 1 daemons, quorum node01 (age 23m) mgr: node01(active, since 21m) osd: 3 osds: 3 up (since 4m), 3 in (since 4m) data: pools: 1 pools, 1 pgs objects: 0 objects, 0 B usage: 3.0 GiB used, 237 GiB / 240 GiB avail pgs: 1 active+clean # confirm OSD tree [root@node01 ~]# ceph osd tree ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF -1 0.23428 root default -3 0.07809 host node01 0 hdd 0.07809 osd.0 up 1.00000 1.00000 -5 0.07809 host node02 1 hdd 0.07809 osd.1 up 1.00000 1.00000 -7 0.07809 host node03 2 hdd 0.07809 osd.2 up 1.00000 1.00000 [root@node01 ~]# ceph df --- RAW STORAGE --- CLASS SIZE AVAIL USED RAW USED %RAW USED hdd 240 GiB 237 GiB 7.6 MiB 3.0 GiB 1.25 TOTAL 240 GiB 237 GiB 7.6 MiB 3.0 GiB 1.25 --- POOLS --- POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL device_health_metrics 1 1 0 B 0 0 B 0 75 GiB [root@node01 ~]# ceph osd df ID CLASS WEIGHT REWEIGHT SIZE RAW USE DATA OMAP META AVAIL %USE VAR PGS STATUS 0 hdd 0.07809 1.00000 80 GiB 1.0 GiB 2.5 MiB 0 B 1 GiB 79 GiB 1.25 1.00 1 up 1 hdd 0.07809 1.00000 80 GiB 1.0 GiB 2.5 MiB 0 B 1 GiB 79 GiB 1.25 1.00 1 up 2 hdd 0.07809 1.00000 80 GiB 1.0 GiB 2.4 MiB 0 B 1 GiB 79 GiB 1.25 1.00 1 up TOTAL 240 GiB 3.0 GiB 7.4 MiB 0 B 3 GiB 237 GiB 1.25 MIN/MAX VAR: 1.00/1.00 STDDEV: 0 |
Sponsored Link |