Advertisement
Guest User

Untitled

a guest
Feb 17th, 2018
63
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 9.55 KB | None | 0 0
  1. [root@blade01 ~]# lxc-attach --name infra1_keystone_container-0f212e1d
  2. [root@infra1-keystone-container-0f212e1d ~]# ip a
  3. 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
  4. link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
  5. inet 127.0.0.1/8 scope host lo
  6. valid_lft forever preferred_lft forever
  7. inet6 ::1/128 scope host
  8. valid_lft forever preferred_lft forever
  9. 451: eth0@if452: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP qlen 1000
  10. link/ether 00:16:3e:1f:b1:76 brd ff:ff:ff:ff:ff:ff link-netnsid 0
  11. inet6 fe80::216:3eff:fe1f:b176/64 scope link
  12. valid_lft forever preferred_lft forever
  13. 453: eth1@if454: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP qlen 1000
  14. link/ether 00:16:3e:61:48:e4 brd ff:ff:ff:ff:ff:ff link-netnsid 0
  15. inet 10.0.0.206/22 brd 10.0.3.255 scope global eth1
  16. valid_lft forever preferred_lft forever
  17. inet6 fe80::216:3eff:fe61:48e4/64 scope link
  18. valid_lft forever preferred_lft forever
  19. [root@infra1-keystone-container-0f212e1d ~]# ip route list
  20. 10.0.0.0/22 dev eth1 proto kernel scope link src 10.0.0.206
  21. 169.254.0.0/16 dev eth1 scope link metric 1453
  22. [root@infra1-keystone-container-0f212e1d ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
  23. # Ansible managed
  24.  
  25. ### start generated network for [ eth1 ] ###
  26. DEVICE=eth1
  27. BOOTPROTO=none
  28. ONBOOT=yes
  29. NM_CONTROLLED=no
  30. TYPE=Ethernet
  31. IPADDR=10.0.0.206
  32. NETMASK=255.255.252.0
  33. MTU=1500
  34. DELAY=0
  35. ### end generated network for [ eth1 ] ###
  36.  
  37.  
  38.  
  39.  
  40.  
  41. cat /etc/openstack_deploy/openstack_user_config.yml
  42. ---
  43. cidr_networks:
  44. container: 172.29.236.0/22
  45. tunnel: 172.29.240.0/22
  46. storage: 172.29.244.0/22
  47.  
  48. used_ips:
  49. - "172.29.236.1,172.29.236.50"
  50. - "172.29.240.1,172.29.240.50"
  51. - "172.29.244.1,172.29.244.50"
  52. - "172.29.248.1,172.29.248.50"
  53.  
  54. global_overrides:
  55. internal_lb_vip_address: 172.29.236.1
  56. #
  57. # The below domain name must resolve to an IP address
  58. # in the CIDR specified in haproxy_keepalived_external_vip_cidr.
  59. # If using different protocols (https/http) for the public/internal
  60. # endpoints the two addresses must be different.
  61. #
  62. external_lb_vip_address: openstackbc.office.dc2
  63. tunnel_bridge: "br-vxlan"
  64. management_bridge: "br-mgmt"
  65. provider_networks:
  66. - network:
  67. container_bridge: "br-mgmt"
  68. container_type: "veth"
  69. container_interface: "eth1"
  70. ip_from_q: "container"
  71. type: "raw"
  72. group_binds:
  73. - all_containers
  74. - hosts
  75. is_container_address: true
  76. is_ssh_address: true
  77. - network:
  78. container_bridge: "br-vxlan"
  79. container_type: "veth"
  80. container_interface: "eth10"
  81. ip_from_q: "tunnel"
  82. type: "vxlan"
  83. range: "1:1000"
  84. net_name: "vxlan"
  85. group_binds:
  86. - neutron_linuxbridge_agent
  87. - network:
  88. container_bridge: "br-vlan"
  89. container_type: "veth"
  90. container_interface: "eth12"
  91. host_bind_override: "eth12"
  92. type: "flat"
  93. net_name: "flat"
  94. group_binds:
  95. - neutron_linuxbridge_agent
  96. - network:
  97. container_bridge: "br-vlan"
  98. container_type: "veth"
  99. container_interface: "eth11"
  100. type: "vlan"
  101. range: "1:1"
  102. net_name: "vlan"
  103. group_binds:
  104. - neutron_linuxbridge_agent
  105. - network:
  106. container_bridge: "br-storage"
  107. container_type: "veth"
  108. container_interface: "eth2"
  109. ip_from_q: "storage"
  110. type: "raw"
  111. group_binds:
  112. - glance_api
  113. - cinder_api
  114. - cinder_volume
  115. - nova_compute
  116.  
  117. ###
  118. ### Infrastructure
  119. ###
  120.  
  121. # galera, memcache, rabbitmq, utility
  122. shared-infra_hosts:
  123. infra1:
  124. ip: 172.29.236.1
  125. infra2:
  126. ip: 172.29.236.2
  127. # repository (apt cache, python packages, etc)
  128. repo-infra_hosts:
  129. infra1:
  130. ip: 172.29.236.1
  131. infra2:
  132. ip: 172.29.236.2
  133. # load balancer
  134. # Ideally the load balancer should not use the Infrastructure hosts.
  135. # Dedicated hardware is best for improved performance and security.
  136. haproxy_hosts:
  137. infra1:
  138. ip: 172.29.236.1
  139. infra2:
  140. ip: 172.29.236.2
  141. # rsyslog server
  142. log_hosts:
  143. infra1:
  144. ip: 172.29.236.1
  145.  
  146. ###
  147. ### OpenStack
  148. ###
  149.  
  150. # keystone
  151. identity_hosts:
  152. infra1:
  153. ip: 172.29.236.1
  154. infra2:
  155. ip: 172.29.236.2
  156. # cinder api services
  157. storage-infra_hosts:
  158. infra1:
  159. ip: 172.29.236.1
  160. infra2:
  161. ip: 172.29.236.2
  162. # glance
  163. # The settings here are repeated for each infra host.
  164. # They could instead be applied as global settings in
  165. # user_variables, but are left here to illustrate that
  166. # each container could have different storage targets.
  167. image_hosts:
  168. infra1:
  169. ip: 172.29.236.1
  170. container_vars:
  171. limit_container_types: glance
  172. glance_nfs_client:
  173. - server: "172.29.244.15"
  174. remote_path: "/images"
  175. local_path: "/var/lib/glance/images"
  176. type: "nfs"
  177. options: "_netdev,auto"
  178. infra2:
  179. ip: 172.29.236.2
  180. container_vars:
  181. limit_container_types: glance
  182. glance_nfs_client:
  183. - server: "172.29.244.15"
  184. remote_path: "/images"
  185. local_path: "/var/lib/glance/images"
  186. type: "nfs"
  187. options: "_netdev,auto"
  188. container_vars:
  189. limit_container_types: glance
  190. glance_nfs_client:
  191. - server: "172.29.244.15"
  192. remote_path: "/images"
  193. local_path: "/var/lib/glance/images"
  194. type: "nfs"
  195. options: "_netdev,auto"
  196.  
  197. # nova api, conductor, etc services
  198. compute-infra_hosts:
  199. infra1:
  200. ip: 172.29.236.1
  201. infra2:
  202. ip: 172.29.236.2
  203. # heat
  204. orchestration_hosts:
  205. infra1:
  206. ip: 172.29.236.1
  207. infra2:
  208. ip: 172.29.236.2
  209. # horizon
  210. dashboard_hosts:
  211. infra1:
  212. ip: 172.29.236.1
  213. infra2:
  214. ip: 172.29.236.2
  215. # neutron server, agents (L3, etc)
  216. network_hosts:
  217. infra1:
  218. ip: 172.29.236.1
  219. infra2:
  220. ip: 172.29.236.2
  221. # ceilometer (telemetry API)
  222. metering-infra_hosts:
  223. infra1:
  224. ip: 172.29.236.1
  225. infra2:
  226. ip: 172.29.236.2
  227. # aodh (telemetry alarm service)
  228. metering-alarm_hosts:
  229. infra1:
  230. ip: 172.29.236.1
  231. infra2:
  232. ip: 172.29.236.2
  233. # gnocchi (telemetry metrics storage)
  234. metrics_hosts:
  235. infra1:
  236. ip: 172.29.236.1
  237. infra2:
  238. ip: 172.29.236.2
  239.  
  240. ###
  241. ### Additional Services
  242. ###
  243. # The infra nodes that will be running the magnum services
  244. magnum-infra_hosts:
  245. infra1:
  246. ip: 172.29.236.1
  247. infra2:
  248. ip: 172.29.236.2
  249.  
  250. swift_hosts:
  251. compute3:
  252. ip: 172.29.236.4
  253. container_vars:
  254. swift_vars:
  255. zone: 0
  256. compute4:
  257. ip: 172.29.236.5
  258. container_vars:
  259. swift_vars:
  260. zone: 1
  261. compute5:
  262. ip: 172.29.236.6
  263. container_vars:
  264. swift_vars:
  265. zone: 2
  266.  
  267. # nova hypervisors
  268. compute_hosts:
  269. infra2:
  270. ip: 172.29.236.2
  271. compute2:
  272. ip: 172.29.236.3
  273. compute3:
  274. ip: 172.29.236.4
  275. compute4:
  276. ip: 172.29.236.5
  277. compute5:
  278. ip: 172.29.236.6
  279. compute6:
  280. ip: 172.29.236.7
  281. compute7:
  282. ip: 172.29.236.8
  283. compute8:
  284. ip: 172.29.236.9
  285. compute9:
  286. ip: 172.29.236.10
  287. compute10:
  288. ip: 172.29.236.11
  289. compute11:
  290. ip: 172.29.236.12
  291. compute12:
  292. ip: 172.29.236.13
  293. compute13:
  294. ip: 172.29.236.14
  295. compute14:
  296. ip: 172.29.236.15
  297. compute15:
  298. ip: 172.29.236.16
  299.  
  300.  
  301.  
  302. # ceilometer compute agent (telemetry)
  303. metering-compute_hosts:
  304. infra2:
  305. ip: 172.29.236.2
  306. compute2:
  307. ip: 172.29.236.3
  308. compute3:
  309. ip: 172.29.236.4
  310. compute4:
  311. ip: 172.29.236.5
  312. compute5:
  313. ip: 172.29.236.6
  314. compute6:
  315. ip: 172.29.236.7
  316. compute7:
  317. ip: 172.29.236.8
  318. compute8:
  319. ip: 172.29.236.9
  320. compute9:
  321. ip: 172.29.236.10
  322. compute10:
  323. ip: 172.29.236.11
  324. compute11:
  325. ip: 172.29.236.12
  326. compute12:
  327. ip: 172.29.236.13
  328. compute13:
  329. ip: 172.29.236.14
  330. compute14:
  331. ip: 172.29.236.15
  332. compute15:
  333. ip: 172.29.236.16
  334.  
  335. # cinder volume hosts (NFS-backed)
  336. # The settings here are repeated for each infra host.
  337. # They could instead be applied as global settings in
  338. # user_variables, but are left here to illustrate that
  339. # each container could have different storage targets.
  340. storage_hosts:
  341. infra1:
  342. ip: 172.29.236.1
  343. container_vars:
  344. cinder_backends:
  345. limit_container_types: cinder_volume
  346. nfs_volume:
  347. volume_backend_name: NFS_VOLUME1
  348. volume_driver: cinder.volume.drivers.nfs.NfsDriver
  349. nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
  350. nfs_shares_config: /etc/cinder/nfs_shares
  351. shares:
  352. - ip: "172.29.244.15"
  353. share: "/vol/cinder"
  354. infra2:
  355. ip: 172.29.236.2
  356. container_vars:
  357. cinder_backends:
  358. limit_container_types: cinder_volume
  359. nfs_volume:
  360. volume_backend_name: NFS_VOLUME1
  361. volume_driver: cinder.volume.drivers.nfs.NfsDriver
  362. nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
  363. nfs_shares_config: /etc/cinder/nfs_shares
  364. shares:
  365. - ip: "172.29.244.15"
  366. share: "/vol/cinder"
  367. container_vars:
  368. cinder_backends:
  369. limit_container_types: cinder_volume
  370. nfs_volume:
  371. volume_backend_name: NFS_VOLUME1
  372. volume_driver: cinder.volume.drivers.nfs.NfsDriver
  373. nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
  374. nfs_shares_config: /etc/cinder/nfs_shares
  375. shares:
  376. - ip: "172.29.244.15"
  377. share: "/vol/cinder"
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement