# 대시보드의 OSD 알람 처리

bash-4.4$ ceph status
cluster:
id: d874b4ea-8deb-4aa3-a3ac-e750180a6a5b
health: HEALTH_WARN
mon b is low on available space
services:
mon: 3 daemons, quorum a,b,c (age 4M)
mgr: b(active, since 4M), standbys: a
mds: 1/1 daemons up, 1 hot standby
osd: 4 osds: 3 up (since 4M), 3 in (since 13M)
data:
volumes: 1/1 healthy
pools: 5 pools, 113 pgs
objects: 20.17k objects, 871 MiB
usage: 7.4 GiB used, 83 GiB / 90 GiB avail
pgs: 113 active+clean
io:
client: 853 B/s rd, 2 op/s rd, 0 op/s wr
bash-4.4$ ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
ssd 90 GiB 83 GiB 7.4 GiB 7.4 GiB 8.22
TOTAL 90 GiB 83 GiB 7.4 GiB 7.4 GiB 8.22
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
.mgr 1 1 449 KiB 2 1.3 MiB 0 26 GiB
replicapool 3 32 338 MiB 126 1013 MiB 1.25 26 GiB
myfs-metadata 4 16 209 MiB 390 628 MiB 0.78 26 GiB
myfs-replicated 5 32 158 B 3.00k 12 KiB 0 26 GiB
myfs-data0 6 32 286 MiB 16.65k 983 MiB 1.21 26 GiB
bash-4.4$ ceph osd status
ID HOST USED AVAIL WR OPS WR DATA RD OPS RD DATA STATE
0 k8s-worker-01 2515M 27.5G 0 0 1 90 exists,up
1 k8s-worker-02 2511M 27.5G 0 0 0 0 exists,up
2 k8s-worker-03 2544M 27.5G 0 0 1 16 exists,up
3 0 0 0 0 0 0 autoout,exists,new
bash-4.4$ ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.08789 root default
-3 0.02930 host k8s-worker-01
0 ssd 0.02930 osd.0 up 1.00000 1.00000
-5 0.02930 host k8s-worker-02
1 ssd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host k8s-worker-03
2 ssd 0.02930 osd.2 up 1.00000 1.00000
3 0 osd.3 down 0 1.00000
# 미사용 osd 제거
bash-4.4$ ceph osd crush remove osd.3
device 'osd.3' does not appear in the crush map
bash-4.4$ ceph auth del osd.3
bash-4.4$ ceph osd rm 3
removed osd.3
bash-4.4$ ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.08789 root default
-3 0.02930 host k8s-worker-01
0 ssd 0.02930 osd.0 up 1.00000 1.00000
-5 0.02930 host k8s-worker-02
1 ssd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host k8s-worker-03
2 ssd 0.02930 osd.2 up 1.00000 1.00000