Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- [root@blade01 ~]# lxc-attach --name infra1_keystone_container-0f212e1d
- [root@infra1-keystone-container-0f212e1d ~]# ip a
- 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
- link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
- inet 127.0.0.1/8 scope host lo
- valid_lft forever preferred_lft forever
- inet6 ::1/128 scope host
- valid_lft forever preferred_lft forever
- 451: eth0@if452: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP qlen 1000
- link/ether 00:16:3e:1f:b1:76 brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet6 fe80::216:3eff:fe1f:b176/64 scope link
- valid_lft forever preferred_lft forever
- 453: eth1@if454: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP qlen 1000
- link/ether 00:16:3e:61:48:e4 brd ff:ff:ff:ff:ff:ff link-netnsid 0
- inet 10.0.0.206/22 brd 10.0.3.255 scope global eth1
- valid_lft forever preferred_lft forever
- inet6 fe80::216:3eff:fe61:48e4/64 scope link
- valid_lft forever preferred_lft forever
- [root@infra1-keystone-container-0f212e1d ~]# ip route list
- 10.0.0.0/22 dev eth1 proto kernel scope link src 10.0.0.206
- 169.254.0.0/16 dev eth1 scope link metric 1453
- [root@infra1-keystone-container-0f212e1d ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
- # Ansible managed
- ### start generated network for [ eth1 ] ###
- DEVICE=eth1
- BOOTPROTO=none
- ONBOOT=yes
- NM_CONTROLLED=no
- TYPE=Ethernet
- IPADDR=10.0.0.206
- NETMASK=255.255.252.0
- MTU=1500
- DELAY=0
- ### end generated network for [ eth1 ] ###
- cat /etc/openstack_deploy/openstack_user_config.yml
- ---
- cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
- used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
- global_overrides:
- internal_lb_vip_address: 172.29.236.1
- #
- # The below domain name must resolve to an IP address
- # in the CIDR specified in haproxy_keepalived_external_vip_cidr.
- # If using different protocols (https/http) for the public/internal
- # endpoints the two addresses must be different.
- #
- external_lb_vip_address: openstackbc.office.dc2
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
- ###
- ### Infrastructure
- ###
- # galera, memcache, rabbitmq, utility
- shared-infra_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # repository (apt cache, python packages, etc)
- repo-infra_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # load balancer
- # Ideally the load balancer should not use the Infrastructure hosts.
- # Dedicated hardware is best for improved performance and security.
- haproxy_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # rsyslog server
- log_hosts:
- infra1:
- ip: 172.29.236.1
- ###
- ### OpenStack
- ###
- # keystone
- identity_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # cinder api services
- storage-infra_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # glance
- # The settings here are repeated for each infra host.
- # They could instead be applied as global settings in
- # user_variables, but are left here to illustrate that
- # each container could have different storage targets.
- image_hosts:
- infra1:
- ip: 172.29.236.1
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.15"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- infra2:
- ip: 172.29.236.2
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.15"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.15"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- # nova api, conductor, etc services
- compute-infra_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # heat
- orchestration_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # horizon
- dashboard_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # neutron server, agents (L3, etc)
- network_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # ceilometer (telemetry API)
- metering-infra_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # aodh (telemetry alarm service)
- metering-alarm_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- # gnocchi (telemetry metrics storage)
- metrics_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- ###
- ### Additional Services
- ###
- # The infra nodes that will be running the magnum services
- magnum-infra_hosts:
- infra1:
- ip: 172.29.236.1
- infra2:
- ip: 172.29.236.2
- swift_hosts:
- compute3:
- ip: 172.29.236.4
- container_vars:
- swift_vars:
- zone: 0
- compute4:
- ip: 172.29.236.5
- container_vars:
- swift_vars:
- zone: 1
- compute5:
- ip: 172.29.236.6
- container_vars:
- swift_vars:
- zone: 2
- # nova hypervisors
- compute_hosts:
- infra2:
- ip: 172.29.236.2
- compute2:
- ip: 172.29.236.3
- compute3:
- ip: 172.29.236.4
- compute4:
- ip: 172.29.236.5
- compute5:
- ip: 172.29.236.6
- compute6:
- ip: 172.29.236.7
- compute7:
- ip: 172.29.236.8
- compute8:
- ip: 172.29.236.9
- compute9:
- ip: 172.29.236.10
- compute10:
- ip: 172.29.236.11
- compute11:
- ip: 172.29.236.12
- compute12:
- ip: 172.29.236.13
- compute13:
- ip: 172.29.236.14
- compute14:
- ip: 172.29.236.15
- compute15:
- ip: 172.29.236.16
- # ceilometer compute agent (telemetry)
- metering-compute_hosts:
- infra2:
- ip: 172.29.236.2
- compute2:
- ip: 172.29.236.3
- compute3:
- ip: 172.29.236.4
- compute4:
- ip: 172.29.236.5
- compute5:
- ip: 172.29.236.6
- compute6:
- ip: 172.29.236.7
- compute7:
- ip: 172.29.236.8
- compute8:
- ip: 172.29.236.9
- compute9:
- ip: 172.29.236.10
- compute10:
- ip: 172.29.236.11
- compute11:
- ip: 172.29.236.12
- compute12:
- ip: 172.29.236.13
- compute13:
- ip: 172.29.236.14
- compute14:
- ip: 172.29.236.15
- compute15:
- ip: 172.29.236.16
- # cinder volume hosts (NFS-backed)
- # The settings here are repeated for each infra host.
- # They could instead be applied as global settings in
- # user_variables, but are left here to illustrate that
- # each container could have different storage targets.
- storage_hosts:
- infra1:
- ip: 172.29.236.1
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.15"
- share: "/vol/cinder"
- infra2:
- ip: 172.29.236.2
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.15"
- share: "/vol/cinder"
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.15"
- share: "/vol/cinder"
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement