[client.libvirt] admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor log file = /var/log/ceph/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor # Please do not change this file directly since it is managed by Ansible and will be overwritten [global] # let's force the admin socket the way it was so we can properly check for existing instances # also the line $cluster-$name.$pid.$cctid.asok is only needed when running multiple instances # of the same daemon, thing ceph-ansible cannot do at the time of writing admin socket = "$run_dir/$cluster-$name.asok" cluster network = 172.17.4.0/24 fsid = 8c1bd066-104e-11e9-aa09-5254000e346c log file = /dev/null mon cluster log file = /dev/null mon host = 172.17.3.11,172.17.3.24,172.17.3.35 osd_pool_default_pg_num = 32 osd_pool_default_pgp_num = 32 osd_pool_default_size = 3 public network = 172.17.3.0/24 rgw_keystone_accepted_roles = Member, admin rgw_keystone_admin_domain = default rgw_keystone_admin_password = 3YRP8XRYmMDbNh94rJ62yTkDF rgw_keystone_admin_project = service rgw_keystone_admin_user = swift rgw_keystone_api_version = 3 rgw_keystone_implicit_tenants = true rgw_keystone_revocation_interval = 0 rgw_keystone_url = http://172.17.1.25:5000 rgw_s3_auth_use_keystone = true [osd] osd memory target = 4294967296