ceph cluster 部署 (cephadm)

摘要:
https://docs.ceph.com/en/pacific/cephadmhttps://www.cnblogs.com/st2021/p/14970266.htmlhttps://blog.csdn.net/get_set/article/details/108092248cephhostnamecategoryhardwareeth0-公共团体

https://docs.ceph.com/en/pacific/cephadm
https://www.cnblogs.com/st2021/p/14970266.html
https://blog.csdn.net/get_set/article/details/108092248

ceph

hostnamecategoryhardwareeth0 - publiceth1eth2 - clustereth3gateway
vm-201ceph-moncore*1 / 2g / 20GB192.168.100.20110.0.100.20110.0.110.20110.0.120.201192.168.100.1
vm-202ceph-moncore*1 / 2g / 20GB192.168.100.20210.0.100.20210.0.110.20210.0.120.202192.168.100.1
vm-203ceph-moncore*1 / 2g / 20GB192.168.100.20310.0.100.20310.0.110.20310.0.120.203192.168.100.1
vm-204ceph-osdcore*4 / 4g / 20GB,10GBx2,30GBx4192.168.100.20410.0.100.20410.0.110.20410.0.120.204192.168.100.1
vm-205ceph-osdcore*4 / 4g / 20GB,10GBx2,30GBx4192.168.100.20510.0.100.20510.0.110.20510.0.120.205192.168.100.1
vm-206ceph-osdcore*4 / 4g / 20GB,10GBx2,30GBx4192.168.100.20610.0.100.20610.0.110.20610.0.120.206192.168.100.1

CentOS Linux release 7.9.2009 (Core)

1. ssh信任登录

ssh-keygen -b 1024 -t rsa -P '' -f ~/.ssh/id_rsa

for i in {202..206}; do ssh-copy-id -i .ssh/id_rsa.pub 192.168.100.$i; done

2. 静态指向

cat > /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

# admin
192.168.100.210 vm-210

# openstack
192.168.100.211 vm-211
192.168.100.212 vm-212
192.168.100.213 vm-213
192.168.100.214 vm-214
192.168.100.215 vm-215
192.168.100.216 vm-216
192.168.100.217 vm-217
192.168.100.218 vm-218
192.168.100.219 vm-219

# k8s
192.168.100.191 vm-191
192.168.100.192 vm-192
192.168.100.193 vm-193
192.168.100.194 vm-194
192.168.100.195 vm-195
192.168.100.196 vm-196
192.168.100.197 vm-197
192.168.100.207 vm-207
192.168.100.198 vm-198

# ceph
192.168.100.201 vm-201
192.168.100.202 vm-202
192.168.100.203 vm-203
192.168.100.204 vm-204
192.168.100.205 vm-205
192.168.100.206 vm-206
EOF

for i in {202..206}; do scp /etc/hosts vm-$i:/etc; done

3. docker

cat > /etc/yum.repos.d/docker-ce.repo << EOF
[docker-ce-stable]
name=Docker CE Stable - \$basearch
baseurl=https://mirrors.nju.edu.cn/docker-ce/linux/centos/\$releasever/\$basearch/stable
enabled=1
gpgcheck=0
gpgkey=https://mirrors.nju.edu.cn/docker-ce/linux/centos/gpg
EOF

yum install -y docker-ce  && systemctl restart docker

cat > /etc/docker/daemon.json << EOF
{
  "registry-mirrors": ["https://registry.docker-cn.com", "http://hub-mirror.c.163.com", "https://docker.mirrors.ustc.edu.cn"],
  "insecure-registries": ["https://192.168.100.198:5000"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

systemctl restart docker  && systemctl enable docker 
docker version && docker info

for i in {202..206}; do scp /etc/yum.repos.d/docker-ce.repo vm-$i:/etc/yum.repos.d; done
for i in {202..206}; do ssh vm-$i 'yum install -y docker-ce  && systemctl restart docker'; done

for i in {202..206}; do scp /etc/docker/daemon.json vm-$i:/etc/docker/; done
for i in {202..206}; do ssh vm-$i 'systemctl enable docker && systemctl restart docker'; done

4. python

pip3 install pip -U -i https://pypi.tuna.tsinghua.edu.cn/simple
pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple

for i in {202..206}; do ssh vm-$i 'pip3 install pip -U -i https://pypi.tuna.tsinghua.edu.cn/simple; pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple'; done

5. time & lvm2

yum install -y chrony lvm2; systemctl enable chronyd && systemctl restart chronyd

for i in {202..206}; do ssh vm-$i 'yum install -y chrony lvm2; systemctl enable chronyd && systemctl restart chronyd'; done

6. cephadm

curl -k https://raw.fastgit.org/ceph/ceph/v15.2.15/src/cephadm/cephadm -o /usr/sbin/cephadm
chmod 755 /usr/sbin/cephadm

sed -e 's|quay.io/ceph/ceph:v15|192.168.100.198:5000/ceph/ceph:v15.2.13|g' \
    -e 's|quay.io/prometheus/prometheus:v2.18.1|192.168.100.198:5000/prometheus/prometheus:2.31.1|g' \
    -e 's|quay.io/prometheus/node-exporter:v0.18.1|192.168.100.198:5000/prometheus/node-exporter:1.2.2|g' \
    -e 's|quay.io/prometheus/alertmanager:v0.20.0|192.168.100.198:5000/prometheus/alertmanager:0.23.0|g' \
    -e 's|quay.io/ceph/ceph-grafana:6.7.4|192.168.100.198:5000/ceph/ceph-grafana:6.7.4|g' \
    -i /usr/sbin/cephadm

head -n 10 /usr/sbin/cephadm
#!/usr/bin/python3

# Default container images -----------------------------------------------------
DEFAULT_IMAGE = '192.168.100.198:5000/ceph/ceph:v15.2.13'
DEFAULT_IMAGE_IS_MASTER = False
DEFAULT_PROMETHEUS_IMAGE = '192.168.100.198:5000/prometheus/prometheus:2.31.1'
DEFAULT_NODE_EXPORTER_IMAGE = '192.168.100.198:5000/prometheus/node-exporter:1.2.2'
DEFAULT_ALERT_MANAGER_IMAGE = '192.168.100.198:5000/prometheus/alertmanager:0.23.0'
DEFAULT_GRAFANA_IMAGE = '192.168.100.198:5000/ceph/ceph-grafana:6.7.4'
# ------------------------------------------------------------------------------

for i in {202..203}; do scp /usr/sbin/cephadm vm-$i:/usr/sbin; done
for i in {202..203}; do ssh vm-$i 'chmod 755 /usr/sbin/cephadm'; done

7. bootstrap

# cephadm bootstrap --mon-ip 192.168.100.201 

Ceph Dashboard is now available at:

	     URL: https://vm-201:8443/
	    User: admin
	Password: fkrf1t3p89

You can access the Ceph CLI with:

	sudo /usr/sbin/cephadm shell --fsid 75600af8-477e-11ec-85f6-f2b532235db6 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring

Please consider enabling telemetry to help improve Ceph:

	ceph telemetry on

For more information see:

	https://docs.ceph.com/docs/master/mgr/telemetry/

Bootstrap complete.

# cephadm shell -- ceph -s
Inferring fsid 75600af8-477e-11ec-85f6-f2b532235db6
Inferring config /var/lib/ceph/75600af8-477e-11ec-85f6-f2b532235db6/mon.vm-201/config
Using recent ceph image 192.168.100.198:5000/ceph/ceph@sha256:0368cf225b3a13b7bdeb3d81ecf370a62931ffa5ff87af880d66aebae74f910a
  cluster:
    id:     75600af8-477e-11ec-85f6-f2b532235db6
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum vm-201 (age 8m)
    mgr: vm-201.feujdg(active, since 8m)
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:  

# or 

cephadm add-repo --release octopus
sed -e 's|download.ceph.com|mirrors.nju.edu.cn/ceph|g' -i /etc/yum.repos.d/ceph.repo
cephadm install ceph-common 

ceph -s 

ceph config set global public_network 192.168.100.0/24
ceph config set global cluster_network 10.100.110.0/24
ceph config set global public_network_interface eth0
ceph config set global cluster_network_interface eth2

8. unmanaged

ceph orch apply mon --unmanaged
ceph orch apply mgr --unmanaged
ceph orch apply osd --all-available-devices --unmanaged

9. add hosts

ceph cephadm get-pub-key > ~/ceph.pub
for i in {202..206}; do ssh-copy-id -f -i ~/ceph.pub root@vm-$i; done

ceph orch host add vm-201 192.168.100.201 --labels mon mgr rgw
ceph orch host add vm-202 192.168.100.202 --labels mon mgr rgw
ceph orch host add vm-203 192.168.100.203 --labels mon mgr rgw

ceph orch host add vm-204 192.168.100.204 --labels osd
ceph orch host add vm-205 192.168.100.205 --labels osd
ceph orch host add vm-206 192.168.100.206 --labels osd

# ceph orch host ls
HOST    ADDR             LABELS  STATUS  
vm-201  192.168.100.201  mon             
vm-202  192.168.100.202  mon             
vm-203  192.168.100.203  mon             
vm-204  192.168.100.204  osd             
vm-205  192.168.100.205  osd             
vm-206  192.168.100.206  osd 

10. deploy mon

ceph orch apply mon vm-201,vm-202,vm-203

11. deploy mgr

ceph orch apply mgr vm-201,vm-202,vm-203

12. deploy osd

# ceph orch device ls --wide
Hostname  Path      Type  Transport  RPM      Vendor  Model          Serial       Size   Health   Ident  Fault  Available  Reject Reasons  
vm-204    /dev/sdb  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi6  32.2G  Unknown  N/A    N/A    Yes                        
vm-204    /dev/sdc  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi5  32.2G  Unknown  N/A    N/A    Yes                        
vm-204    /dev/sdd  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi4  32.2G  Unknown  N/A    N/A    Yes                        
vm-204    /dev/sde  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi3  32.2G  Unknown  N/A    N/A    Yes                        
vm-204    /dev/sdf  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi2  10.7G  Unknown  N/A    N/A    Yes                        
vm-204    /dev/sdg  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi1  10.7G  Unknown  N/A    N/A    Yes                        
vm-205    /dev/sdb  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi6  32.2G  Unknown  N/A    N/A    Yes                        
vm-205    /dev/sdc  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi5  32.2G  Unknown  N/A    N/A    Yes                        
vm-205    /dev/sdd  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi4  32.2G  Unknown  N/A    N/A    Yes                        
vm-205    /dev/sde  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi3  32.2G  Unknown  N/A    N/A    Yes                        
vm-205    /dev/sdf  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi2  10.7G  Unknown  N/A    N/A    Yes                        
vm-205    /dev/sdg  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi1  10.7G  Unknown  N/A    N/A    Yes                        
vm-206    /dev/sdb  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi6  32.2G  Unknown  N/A    N/A    Yes                        
vm-206    /dev/sdc  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi5  32.2G  Unknown  N/A    N/A    Yes                        
vm-206    /dev/sdd  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi4  32.2G  Unknown  N/A    N/A    Yes                        
vm-206    /dev/sde  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi3  32.2G  Unknown  N/A    N/A    Yes                        
vm-206    /dev/sdf  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi2  10.7G  Unknown  N/A    N/A    Yes                        
vm-206    /dev/sdg  hdd   Unknown    Unknown  QEMU    QEMU HARDDISK  drive-scsi1  10.7G  Unknown  N/A    N/A    Yes  

cat > /tmp/osds.yaml << EOF
service_type: osd
service_id: osd_using_paths
placement:
  hosts:
    - vm-204
    - vm-205
    - vm-206
spec:
  data_devices:
    paths:
    - /dev/sdb
    - /dev/sdc
    - /dev/sdd
    - /dev/sde
  db_devices:
    paths:
    - /dev/sdf
  wal_devices:
    paths:
    - /dev/sdg
EOF

ceph orch apply -i /tmp/osds.yaml 

# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME        STATUS  REWEIGHT  PRI-AFF
-1         0.38031  root default                              
-5         0.12677      host vm-204                           
 1    hdd  0.03169          osd.1        up   1.00000  1.00000
 4    hdd  0.03169          osd.4        up   1.00000  1.00000
 7    hdd  0.03169          osd.7        up   1.00000  1.00000
10    hdd  0.03169          osd.10       up   1.00000  1.00000
-3         0.12677      host vm-205                           
 0    hdd  0.03169          osd.0        up   1.00000  1.00000
 3    hdd  0.03169          osd.3        up   1.00000  1.00000
 6    hdd  0.03169          osd.6        up   1.00000  1.00000
 9    hdd  0.03169          osd.9        up   1.00000  1.00000
-7         0.12677      host vm-206                           
 2    hdd  0.03169          osd.2        up   1.00000  1.00000
 5    hdd  0.03169          osd.5        up   1.00000  1.00000
 8    hdd  0.03169          osd.8        up   1.00000  1.00000
11    hdd  0.03169          osd.11       up   1.00000  1.00000

# ceph osd df 
ID  CLASS  WEIGHT   REWEIGHT  SIZE     RAW USE  DATA     OMAP    META      AVAIL    %USE   VAR   PGS  STATUS
 1    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   4 KiB  1024 MiB   29 GiB  10.79  1.00    0      up
 4    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   3 KiB  1024 MiB   29 GiB  10.79  1.00    0      up
 7    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   4 KiB  1024 MiB   29 GiB  10.79  1.00    1      up
10    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   5 KiB  1024 MiB   29 GiB  10.79  1.00    0      up
 0    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   4 KiB  1024 MiB   29 GiB  10.79  1.00    0      up
 3    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   2 KiB  1024 MiB   29 GiB  10.79  1.00    0      up
 6    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   3 KiB  1024 MiB   29 GiB  10.79  1.00    0      up
 9    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   4 KiB  1024 MiB   29 GiB  10.79  1.00    1    down
 2    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   3 KiB  1024 MiB   29 GiB  10.79  1.00    0      up
 5    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   5 KiB  1024 MiB   29 GiB  10.79  1.00    1      up
 8    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   3 KiB  1024 MiB   29 GiB  10.79  1.00    0      up
11    hdd  0.03169   1.00000   32 GiB  3.5 GiB   11 MiB   4 KiB  1024 MiB   29 GiB  10.79  1.00    0      up
                       TOTAL  390 GiB   42 GiB  132 MiB  50 KiB    12 GiB  348 GiB  10.79                   
MIN/MAX VAR: 1.00/1.00  STDDEV: 0

13. deploy rgw

radosgw-admin realm create --rgw-realm=default-realm --default
radosgw-admin zonegroup create --rgw-zonegroup=default-zonegroup --master --default
radosgw-admin zone create --rgw-zonegroup=default-zonegroup --rgw-zone=default-zone --master --default
radosgw-admin period update --rgw-realm=default-realm --commit 

ceph orch apply rgw default-realm default-zone --unmanaged
ceph orch daemon add rgw default-realm default-zone --placement="vm-201 vm-202 vm-203"

# ceph -s
  cluster:
    id:     cc84d9b4-4830-11ec-a506-f2b532235db6
    health: HEALTH_WARN
            1 failed cephadm daemon(s)
 
  services:
    mon: 3 daemons, quorum vm-201,vm-202,vm-203 (age 65m)
    mgr: vm-201.zgeeaz(active, since 94m), standbys: vm-202.tzqyjz, vm-203.tosmgb
    osd: 12 osds: 12 up (since 18m), 12 in (since 19m)
    rgw: 3 daemons active (default-realm.default-zone.vm-201.gbechp, default-realm.default-zone.vm-202.ojodvg, default-realm.default-zone.vm-203.ecllzd)
 
  task status:
 
  data:
    pools:   5 pools, 105 pgs
    objects: 201 objects, 7.4 KiB
    usage:   42 GiB used, 348 GiB / 390 GiB avail
    pgs:     105 active+clean
 
  io:
    client:   43 KiB/s rd, 170 B/s wr, 43 op/s rd, 23 op/s wr

pool

ceph config set mon mon_allow_pool_delete true

ceph osd pool rm .rgw.root .rgw.root --yes-i-really-really-mean-it

service

ceph orch rm node-exporter
ceph orch apply node-exporter '*'
ceph orch redeploy node-exporter

* cluster network没找到解决方法

免责声明:文章转载自《ceph cluster 部署 (cephadm)》仅用于学习参考。如对内容有疑问,请及时联系本站处理。

上篇javascript调用WebService Hello WorldHTTP请求状态码下篇

宿迁高防,2C2G15M,22元/月;香港BGP,2C5G5M,25元/月 雨云优惠码:MjYwNzM=

相关文章

微信小程序访问webservice(wsdl)+ axis2发布服务端(Java)

0、主要思路:使用axis2发布webservice服务端,微信小程序作为客户端访问。步骤如下: 1、服务端: 首先微信小程序仅支持访问https的url,且必须是已备案域名。因此前期的服务器端工作需要先做好,本人是申请了个人域名(已备案),并使用阿里云服务器,然后申请免费SSL,通过配置tomcat完成支持https访问。此外,intellJ IDE的j...

Linux的VMware虚拟机无法上网问题

很多时候,Linux无法上网,不管改成nat模式还是主机模式都不行。这时候可以选择进行重置: 首先点击编辑,之后点击虚拟网络编辑器, 然后移除VMnet0和VMnet8网络,点击确定: 然后再打开虚拟网络编辑器,添加网络,VMnet0勾选仅主机模式,VMnet8勾选NAT模式,然后点击确定: 然后更改虚拟机的网络适配器,右键点击以下位置,然后设置,选...

Kubernetes 使用 Kubevirt 运行管理 Windows 10 操作系统

原文链接:https://fuckcloudnative.io/posts/use-kubevirt-to-manage-windows-on-kubernetes/ 最近我发现我的 Kubernetes 集群资源实在是太多了,有点浪费,不信你看: 既然闲置资源那么多,那我何不想办法利用一下。怎么用,用来干什么又是一个问题,想到我手中只有 MacBook...

【推荐】开源项目ElasticAmbari助力 ElasticSearch、Kibana、ambari服务高效运维管理

此文转载自:https://zyc88.blog.csdn.net/article/details/112007608 概述 ElasticAmbari为Ambari的一组自定义服务,为Elastic的产品提供安装和管理的支持。 项目地址: https://github.com/ChengYingOpenSource/ElasticAmbari 开源不易...

webpack使用的心得

1 . 我们需要使用打包工具,首先第一步就得 执行 npm install进行安装,可是很多时候 加载速度很慢,这个时候我们可以 用淘宝镜像源,参考地址: https://npm.taobao.org/ 使用方法如下: npm install --registry=https://registry.npm.taobao.org 全局配置镜像源: 淘宝镜像:...

vue jsonp (转载)

vue 使用 jsonp 请求数据 vue请求数据的时候,会遇到跨域问题,服务器为了保证信息的安全,对跨域请求进行拦截,因此,为了解决vue跨域请求问题,需要使用jsonp。 安装jsonp npm install --save vue-jsonp 引入 安装完成之后在main.js中引入jsonp import VueJsonp from 'v...