View difference between Paste ID: BVTVD8zQ and 2hqL3032
SHOW: | | - or go back to the newest paste.
1-
[global]
1+
ceph-deploy osd activate blade7:/var/local/osd0 blade8:/var/lib/ceph/osd/ceph-1
2-
fsid = 987b3c40-fac6-4cc0-812a-14f950f4dfd7
2+
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/cephcool06/.cephdeploy.conf
3-
mon_initial_members = blade7
3+
[ceph_deploy.cli][INFO  ] Invoked (1.5.13): /usr/bin/ceph-deploy osd activate blade7:/var/local/osd0 blade8:/var/lib/ceph/osd/ceph-1
4-
mon_host = 192.168.51.92
4+
[ceph_deploy.osd][DEBUG ] Activating cluster ceph disks blade7:/var/local/osd0: blade8:/var/lib/ceph/osd/ceph-1:
5-
auth_cluster_required = cephx
5+
[blade7][DEBUG ] connected to host: blade7 
6-
auth_service_required = cephx
6+
[blade7][DEBUG ] detect platform information from remote host
7-
auth_client_required = cephx
7+
[blade7][DEBUG ] detect machine type
8-
filestore_xattr_use_omap = true
8+
[ceph_deploy.osd][INFO  ] Distro info: debian 7.0 wheezy
9-
osd pool default size = 2
9+
[ceph_deploy.osd][DEBUG ] activating host blade7 disk /var/local/osd0
10
[ceph_deploy.osd][DEBUG ] will use init type: sysvinit
11
[blade7][INFO  ] Running command: sudo ceph-disk -v activate --mark-init sysvinit --mount /var/local/osd0
12
[blade7][DEBUG ] === osd.0 === 
13
[blade7][DEBUG ] Starting Ceph osd.0 on blade7...already running
14
[blade7][WARNIN] DEBUG:ceph-disk:Cluster uuid is 987b3c40-fac6-4cc0-812a-14f950f4dfd7
15
[blade7][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid
16
[blade7][WARNIN] DEBUG:ceph-disk:Cluster name is ceph
17
[blade7][WARNIN] DEBUG:ceph-disk:OSD uuid is ed3bbc9f-dba4-4c67-aa1c-e756722e946c
18
[blade7][WARNIN] DEBUG:ceph-disk:OSD id is 0
19
[blade7][WARNIN] DEBUG:ceph-disk:Marking with init system sysvinit
20
[blade7][WARNIN] DEBUG:ceph-disk:ceph osd.0 data dir is ready at /var/local/osd0
21
[blade7][WARNIN] DEBUG:ceph-disk:Starting ceph osd.0...
22
[blade7][WARNIN] INFO:ceph-disk:Running command: /usr/sbin/service ceph start osd.0
23
[blade7][INFO  ] checking OSD status...
24
[blade7][INFO  ] Running command: sudo ceph --cluster=ceph osd stat --format=json
25
[blade7][WARNIN] there is 1 OSD down
26
[blade7][WARNIN] there is 1 OSD out
27
[blade8][DEBUG ] connected to host: blade8 
28
[blade8][DEBUG ] detect platform information from remote host
29
[blade8][DEBUG ] detect machine type
30
[ceph_deploy.osd][INFO  ] Distro info: debian 7.0 wheezy
31
[ceph_deploy.osd][DEBUG ] activating host blade8 disk /var/lib/ceph/osd/ceph-1
32
[ceph_deploy.osd][DEBUG ] will use init type: sysvinit
33
[blade8][INFO  ] Running command: sudo ceph-disk -v activate --mark-init sysvinit --mount /var/lib/ceph/osd/ceph-1
34
[blade8][WARNIN] DEBUG:ceph-disk:Cluster uuid is 62c60864-f3b6-41b4-99ab-7c4d0bf1acad
35
[blade8][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid
36
[blade8][WARNIN] ceph-disk: Error: No cluster conf found in /etc/ceph with fsid 62c60864-f3b6-41b4-99ab-7c4d0bf1acad
37
[blade8][ERROR ] RuntimeError: command returned non-zero exit status: 1
38
[ceph_deploy][ERROR ] RuntimeError: Failed to execute command: ceph-disk -v activate --mark-init sysvinit --mount /var/lib/ceph/osd/ceph-1