Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- /etc/netplan/01-netcfg.yaml
- # This file describes the network interfaces available on your system
- # For more information, see netplan(5).
- network:
- version: 2
- renderer: networkd
- ethernets:
- enp3s0:
- dhcp4: false
- dhcp6: false
- enp4s0:
- dhcp4: false
- dhcp6: false
- bonds:
- bond0:
- interfaces:
- - enp3s0
- - enp4s0
- parameters:
- mode: 802.3ad
- vlans:
- vlan.1:
- id: 1
- link: bond0
- vlan.10:
- id: 10
- link: bond0
- vlan.18:
- id: 18
- link: bond0
- bridges:
- br-mgmt:
- interfaces:
- - vlan.1
- dhcp4: false
- dhcp6: false
- addresses:
- - {{ network_mgmt }}
- gateway4: 10.1.0.1
- nameservers:
- addresses:
- - 1.0.0.1
- - 1.1.1.1
- - 8.8.4.4
- - 8.8.8.8
- br-storage:
- interfaces:
- - vlan.18
- dhcp4: false
- dhcp6: false
- addresses:
- - {{ network_storage }}
- br-vlan:
- interfaces:
- - vlan.10
- dhcp4: false
- dhcp6: false
- /etc/openstack_deploy/openstack_user_config.yml
- cidr_networks:
- container: 10.1.0.0/24
- storage: 10.1.1.0/24
- used_ips:
- - "10.1.0.1,10.1.0.132"
- - "10.1.0.164,10.1.0.254"
- global_overrides:
- internal_lb_vip_address: 10.1.0.100
- management_bridge: br-mgmt
- provider_networks:
- - network:
- group_binds:
- - all_containers
- - hosts
- type: "raw"
- container_bridge: "br-mgmt"
- container_interface: "eth1"
- container_type: "veth"
- ip_from_q: "container"
- is_container_address: true
- - network:
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
- type: "raw"
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- container_mtu: "9000"
- ip_from_q: "storage"
- - network:
- group_binds:
- - neutron_linuxbridge_agent
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth3"
- host_bind_override: "eth3"
- type: "flat"
- net_name: "flat"
- infrastructure_hosts: &infrastructure_hosts
- OpenStack-0:
- ip: 10.1.0.102
- OpenStack-1:
- ip: 10.1.0.103
- OpenStack-2:
- ip: 10.1.0.104
- # haproxy
- haproxy_hosts: *infrastructure_hosts
- # ceph
- ceph-mon_hosts: *infrastructure_hosts
- ceph-osd_hosts: *infrastructure_hosts
- # galera, memcache, rabbitmq, utility
- shared-infra_hosts: *infrastructure_hosts
- # repository (apt cache, python packages, etc)
- repo-infra_hosts: *infrastructure_hosts
- # glance, nova, heat, horizon
- os-infra_hosts: *infrastructure_hosts
- # keystone
- identity_hosts: *infrastructure_hosts
- # neutron
- network_hosts: *infrastructure_hosts
- # nova
- compute_hosts: *infrastructure_hosts
- # storage
- storage-infra_hosts: *infrastructure_hosts
- storage_hosts: *infrastructure_hosts
- /etc/openstack_deploy/user_variables.yml
- # Because we have three haproxy nodes, we need
- # to one active LB IP, and we use keepalived for that.
- ## Load Balancer Configuration (haproxy/keepalived)
- haproxy_keepalived_external_vip_cidr: "198.60.81.0/28"
- haproxy_keepalived_internal_vip_cidr: "10.1.0.0/24"
- haproxy_keepalived_external_interface: br-vlan
- haproxy_keepalived_internal_interface: br-mgmt
- ## Ceph cluster fsid (must be generated before first run)
- ## Generate a uuid using: python -c 'import uuid; print(str(uuid.uuid4()))'
- generate_fsid: false
- fsid: d948cc42-0fa1-4653-bdbd-8ff32bd80e89 # Replace with your generated UUID
- ## ceph-ansible settings
- ## See https://github.com/ceph/ceph-ansible/tree/master/group_vars for
- ## additional configuration options availble.
- monitor_address_block: "{{ cidr_networks.container }}"
- public_network: "{{ cidr_networks.container }}"
- cluster_network: "{{ cidr_networks.storage }}"
- osd_scenario: collocated
- journal_size: 10240 # size in MB
- # ceph-ansible automatically creates pools & keys for OpenStack services
- openstack_config: true
- cinder_ceph_client: cinder
- glance_ceph_client: glance
- glance_default_store: rbd
- glance_rbd_store_pool: images
- nova_libvirt_images_rbd_pool: vms
- cinder_backends:
- RBD:
- volume_driver: cinder.volume.drivers.rbd.RBDDriver
- rbd_pool: volumes
- rbd_ceph_conf: /etc/ceph/ceph.conf
- rbd_store_chunk_size: 8
- volume_backend_name: rbddriver
- rbd_user: "{{ cinder_ceph_client }}"
- rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
- report_discard_supported: true
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement