Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- root@dev-usc1-ost-b-dp1:/etc/openstack_deploy# cat openstack_user_config.yml
- ---
- cidr_networks:
- management: "172.25.240.0/22"
- tunnel: "172.25.224.0/22"
- storage: "172.25.224.0/22"
- used_ips:
- - "172.25.224.11"
- - "172.25.224.102"
- - "172.25.224.103"
- global_overrides:
- internal_lb_vip_address: "172.25.224.103"
- #
- # The below domain name must resolve to an IP address
- # in the CIDR specified in haproxy_keepalived_external_vip_cidr.
- # If using different protocols (https/http) for the public/internal
- # endpoints the two addresses must be different.
- #
- external_lb_vip_address: "172.25.224.102"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "ens4"
- ip_from_q: "management"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_management_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "ens4"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "ens4"
- host_bind_override: "ens4"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "ens4"
- type: "vlan"
- range: "101:200,301:400"
- net_name: "vlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "ens4"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
- ###
- ### Infrastructure
- ###
- # galera, memcache, rabbitmq, utility
- shared-infra_hosts:
- inf5:
- ip: 172.25.225.31
- inf6:
- ip: 172.25.225.32
- inf4:
- ip: 172.25.225.30
- # zookeeper
- coordination_hosts:
- inf5:
- ip: 172.25.225.31
- inf6:
- ip: 172.25.225.32
- inf4:
- ip: 172.25.225.30
- # repository (apt cache, python packages, etc)
- repo-infra_hosts:
- inf5:
- ip: 172.25.225.31
- inf6:
- ip: 172.25.225.32
- inf4:
- ip: 172.25.225.30
- # load balancer
- # Ideally the load balancer should not use the Infrastructure hosts.
- # Dedicated hardware is best for improved performance and security.
- haproxy_hosts:
- hap6:
- ip: 172.25.225.35
- hap5:
- ip: 172.25.225.34
- hap4:
- ip: 172.25.225.33
- ###
- ### OpenStack
- ###
- # keystone
- identity_hosts:
- inf5:
- ip: 172.25.225.31
- inf6:
- ip: 172.25.225.32
- inf4:
- ip: 172.25.225.30
- # cinder api services
- storage-infra_hosts:
- inf5:
- ip: 172.25.225.31
- inf6:
- ip: 172.25.225.32
- inf4:
- ip: 172.25.225.30
- # glance
- # The settings here are repeated for each infra host.
- # They could instead be applied as global settings in
- # user_variables, but are left here to illustrate that
- # each container could have different storage targets.
- image_hosts:
- inf5:
- ip: 172.25.225.31
- container_vars:
- limit_container_types: glance
- glance_remote_client:
- - what: "10.98.11.58:/glance_images"
- where: /var/lib/glance/images
- type: "nfs"
- options: "_netdev,auto"
- inf6:
- ip: 172.25.225.32
- container_vars:
- limit_container_types: glance
- glance_remote_client:
- - what: "10.98.11.58:/glance_images"
- where: /var/lib/glance/images
- type: "nfs"
- options: "_netdev,auto"
- inf4:
- ip: 172.25.225.30
- container_vars:
- limit_container_types: glance
- glance_remote_client:
- - what: "10.98.11.58:/glance_images"
- where: /var/lib/glance/images
- type: "nfs"
- options: "_netdev,auto"
- # placement
- placement-infra_hosts:
- inf5:
- ip: 172.25.225.31
- inf6:
- ip: 172.25.225.32
- inf4:
- ip: 172.25.225.30
- # nova api, conductor, etc services
- compute-infra_hosts:
- inf5:
- ip: 172.25.225.31
- inf6:
- ip: 172.25.225.32
- inf4:
- ip: 172.25.225.30
- # heat
- orchestration_hosts:
- inf5:
- ip: 172.25.225.31
- inf6:
- ip: 172.25.225.32
- inf4:
- ip: 172.25.225.30
- # horizon
- dashboard_hosts:
- inf5:
- ip: 172.25.225.31
- inf6:
- ip: 172.25.225.32
- inf4:
- ip: 172.25.225.30
- # neutron api
- network-infra_hosts:
- inf5:
- ip: 172.25.225.31
- inf6:
- ip: 172.25.225.32
- inf4:
- ip: 172.25.225.30
- # neutron agents (L3, DHCP, etc)
- network-agent_hosts:
- net1:
- ip: 172.25.39.35
- net2:
- ip: 172.25.39.36
- # nova hypervisors
- compute_hosts:
- cmp3:
- ip: 172.25.225.40
- host_vars:
- nova_spice_html5proxy_base_url: "https://ost-b-con-dev.d.storagecraft.com/spice_auto.html"
- cmp4:
- ip: 172.25.225.41
- host_vars:
- nova_spice_html5proxy_base_url: "https://ost-b-con-dev.d.storagecraft.com/spice_auto.html"
- # cinder volume hosts (NFS-backed)
- # The settings here are repeated for each infra host.
- # They could instead be applied as global settings in
- # user_variables, but are left here to illustrate that
- # each container could have different storage targets.
- storage_hosts:
- inf5:
- ip: 172.25.225.31
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=1048576,wsize=1048576,timeo=1200,actimeo=120,nolock,noatime"
- nfs_shares_config: /etc/cinder/nfs_shares
- nfs_qcow2_volumes: true
- nfs_snapshot_support: true
- max_over_subscription_ratio: 10000
- shares:
- - ip: "nfs-qcow-b.d.storagecraft.com"
- share: "/qcow_nfs"
- inf6:
- ip: 172.25.225.32
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=1048576,wsize=1048576,timeo=1200,actimeo=120,nolock,noatime"
- nfs_shares_config: /etc/cinder/nfs_shares
- nfs_qcow2_volumes: true
- nfs_snapshot_support: true
- max_over_subscription_ratio: 10000
- shares:
- - ip: "nfs-qcow-b.d.storagecraft.com"
- share: "/qcow_nfs"
- inf4:
- ip: 172.25.225.30
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=1048576,wsize=1048576,timeo=1200,actimeo=120,nolock,noatime"
- nfs_shares_config: /etc/cinder/nfs_shares
- nfs_qcow2_volumes: true
- nfs_snapshot_support: true
- max_over_subscription_ratio: 10000
- shares:
- - ip: "nfs-qcow-b.d.storagecraft.com"
- share: "/qcow_nfs"
- onprem_compute_hosts: {}
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement