View difference between Paste ID: JrKRkkAy and i9vvn7X1
SHOW: | | - or go back to the newest paste.
1-
root@paris3:~# /etc/init.d/ceph -a stop
1+
2
=== mon.0 === 
3-
Stopping Ceph mon.0 on ceph3...kill 45877...done
3+
4
=== mon.1 === 
5-
Stopping Ceph mon.1 on ceph4...kill 32153...done
5+
6
=== mon.2 === 
7-
Stopping Ceph mon.2 on ceph5...kill 39694...done
7+
8
=== mds.0 === 
9-
Stopping Ceph mds.0 on ceph3...kill 46547...done
9+
10
starting mds.0 at :/0
11-
Stopping Ceph mds.1 on ceph4...kill 32614...done
11+
12
Starting Ceph mds.1 on ceph4...
13-
Stopping Ceph mds.2 on ceph5...kill 40157...done
13+
14
=== mds.2 === 
15-
Stopping Ceph osd.0 on ceph3...kill 47584...kill 47584...done
15+
16
starting mds.2 at :/0
17-
Stopping Ceph osd.1 on ceph4...kill 33382...kill 33382...done
17+
18
Mounting xfs on ceph3:/srv/ceph/osd0
19-
Stopping Ceph osd.2 on ceph5...kill 40917...kill 40917...done
19+
20
Starting Ceph osd.0 on ceph3...
21-
Stopping Ceph osd.3 on ceph3...kill 48732...kill 48732...done
21+
22
=== osd.1 === 
23-
Stopping Ceph osd.4 on ceph4...kill 34203...kill 34203...done
23+
24
df: `/srv/ceph/osd1/.': No such file or directory
25-
Stopping Ceph osd.5 on ceph5...kill 41731...kill 41731...done
25+
26
create-or-move updating item id 1 name 'osd.1' weight 1 at location {host=ceph3,root=default} to crush map
27-
Stopping Ceph osd.6 on ceph3...kill 49944...kill 49944...done
27+
28
starting osd.1 at :/0 osd_data /srv/ceph/osd1 /srv/ceph/journals/osd1/journal
29
=== osd.2 === 
30
Mounting xfs on ceph5:/srv/ceph/osd2
31
df: `/srv/ceph/osd2/.': No such file or directory
32
df: no file systems processed
33
create-or-move updating item id 2 name 'osd.2' weight 1 at location {host=ceph3,root=default} to crush map
34
Starting Ceph osd.2 on ceph5...
35
starting osd.2 at :/0 osd_data /srv/ceph/osd2 /srv/ceph/journals/osd2/journal
36
=== osd.3 === 
37
Mounting xfs on ceph3:/srv/ceph/osd3
38
create-or-move updated item id 3 name 'osd.3' weight 0.07 at location {host=ceph3,root=default} to crush map
39
Starting Ceph osd.3 on ceph3...
40
starting osd.3 at :/0 osd_data /srv/ceph/osd3 /srv/ceph/osd3/journal
41
=== osd.4 === 
42
Mounting xfs on ceph4:/srv/ceph/osd4
43
df: `/srv/ceph/osd4/.': No such file or directory
44
df: no file systems processed
45
create-or-move updating item id 4 name 'osd.4' weight 1 at location {host=ceph3,root=default} to crush map
46
Starting Ceph osd.4 on ceph4...
47
starting osd.4 at :/0 osd_data /srv/ceph/osd4 /srv/ceph/osd4/journal
48
=== osd.5 === 
49
Mounting xfs on ceph5:/srv/ceph/osd5
50
df: `/srv/ceph/osd5/.': No such file or directory
51
df: no file systems processed
52
create-or-move updating item id 5 name 'osd.5' weight 1 at location {host=ceph3,root=default} to crush map
53
Starting Ceph osd.5 on ceph5...
54
starting osd.5 at :/0 osd_data /srv/ceph/osd5 /srv/ceph/osd5/journal
55
=== osd.6 === 
56
Mounting xfs on ceph3:/srv/ceph/osd6
57
create-or-move updated item id 6 name 'osd.6' weight 2.73 at location {host=ceph3,root=default} to crush map
58
Starting Ceph osd.6 on ceph3...
59
starting osd.6 at :/0 osd_data /srv/ceph/osd6 /srv/ceph/journals/osd6/journal
60
61
62
63
root@paris3:~# ceph -s
64
   health HEALTH_WARN 41 pgs peering; 727 pgs stale; 41 pgs stuck inactive; 640 pgs stuck stale; 640 pgs stuck unclean; recovery  recovering 16 o/s, 67025KB/s; mds cluster is degraded
65
   monmap e1: 3 mons at {0=10.123.123.3:6789/0,1=10.123.123.4:6789/0,2=10.123.123.5:6789/0}, election epoch 56, quorum 0,1,2 0,1,2
66
   osdmap e389: 7 osds: 7 up, 7 in
67
    pgmap v37440: 1280 pgs: 640 stale+active+clean, 308 active+remapped, 87 stale+active+remapped, 204 active+replay+remapped, 41 remapped+peering; 39222 MB data, 122 GB used, 11259 GB / 11382 GB avail;  recovering 16 o/s, 67025KB/s
68
   mdsmap e107: 1/1/1 up {0=1=up:replay}, 2 up:standby
69
70
71
72
root@paris3:~# ceph osd tree
73
# id	weight	type name	up/down	reweight
74
-6	0.06999	root ssd
75
-7	0.06999		rack ssd_rack_01
76
3	0.06999			osd.3	up	1	
77
-5	0	host ceph5
78
-4	0	host ceph4
79
-2	12.21	host ceph3
80
0	3		osd.0	up	1	
81
6	3		osd.6	up	1	
82
3	0.06999		osd.3	up	1	
83
1	3		osd.1	up	1	
84
2	3		osd.2	up	1	
85
4	0.06999		osd.4	up	1	
86
5	0.06999		osd.5	up	1	
87
-1	6	root default
88
-3	6		rack hdd_rack_01
89
0	3			osd.0	up	1	
90
6	3			osd.6	up	1