Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- root@controller:~# cat /etc/neutron/plugins/ml2/ml2_conf.ini
- [ml2]
- # (ListOpt) List of network type driver entrypoints to be loaded from
- # the neutron.ml2.type_drivers namespace.
- #
- # type_drivers = local,flat,vlan,gre,vxlan
- # Example: type_drivers = flat,vlan,gre,vxlan
- #type_drivers = flat,gre
- type_drivers = vlan
- # (ListOpt) Ordered list of network_types to allocate as tenant
- # networks. The default value 'local' is useful for single-box testing
- # but provides no connectivity between hosts.
- #
- # tenant_network_types = local
- # Example: tenant_network_types = vlan,gre,vxlan
- #tenant_network_types = gre
- tenant_network_types = vlan
- # (ListOpt) Ordered list of networking mechanism driver entrypoints
- # to be loaded from the neutron.ml2.mechanism_drivers namespace.
- mechanism_drivers = openvswitch,sriovnicswitch
- # Example: mechanism_drivers = openvswitch,mlnx
- # Example: mechanism_drivers = arista
- # Example: mechanism_drivers = cisco,logger
- # Example: mechanism_drivers = openvswitch,brocade
- # Example: mechanism_drivers = linuxbridge,brocade
- # (ListOpt) Ordered list of extension driver entrypoints
- # to be loaded from the neutron.ml2.extension_drivers namespace.
- # extension_drivers =
- # Example: extension_drivers = anewextensiondriver
- [ml2_type_flat]
- # (ListOpt) List of physical_network names with which flat networks
- # can be created. Use * to allow flat networks with arbitrary
- # physical_network names.
- #
- #flat_networks = physnet1,physnet2 25-11-2014
- flat_networks = physnet1
- # Example:flat_networks = physnet1,physnet2
- # Example:flat_networks = *
- [ml2_type_vlan]
- # (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
- # specifying physical_network names usable for VLAN provider and
- # tenant networks, as well as ranges of VLAN tags on each
- # physical_network available for allocation as tenant networks.
- #
- #network_vlan_ranges =physnet1:1000:2999,physnet2:500:600 25-11-2014
- #network_vlan_ranges =physnet1:2:100,physnet3:200:300
- network_vlan_ranges =physnet1:2:100
- # Example: network_vlan_ranges = physnet1:1000:2999,physnet2
- [ml2_type_gre]
- # (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
- tunnel_id_ranges = 1:1000
- [ml2_type_vxlan]
- # (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
- # ranges of VXLAN VNI IDs that are available for tenant network allocation.
- #
- # vni_ranges =
- # (StrOpt) Multicast group for the VXLAN interface. When configured, will
- # enable sending all broadcast traffic to this multicast group. When left
- # unconfigured, will disable multicast VXLAN mode.
- #
- # vxlan_group =
- # Example: vxlan_group = 239.1.1.1
- [securitygroup]
- # Controls if neutron security group is enabled or not.
- # It should be false when you use nova security group.
- enable_security_group = True
- # Use ipset to speed-up the iptables security groups. Enabling ipset support
- # requires that ipset is installed on L2 agent node.
- enable_ipset = True
- firewall_driver = neutron.agent.firewall.NoopFirewallDriver
- #firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
- [ovs]
- local_ip = 172.160.100.1
- tunnel_type = gre
- enable_tunneling = True
- bridge_mappings = physnet2:br-ex
- [agent]
- tunnel_types = gre
- root@controller:~# cat /etc/neutron/plugins/ml2/ml2_conf_sriov.ini
- # Defines configuration options for SRIOV NIC Switch MechanismDriver
- # and Agent
- [ml2_sriov]
- # (ListOpt) Comma-separated list of
- # supported Vendor PCI Devices, in format vendor_id:product_id
- #
- # supported_pci_vendor_devs = 15b3:1004, 8086:10c9
- # Example: supported_pci_vendor_devs = 15b3:1004
- #
- # (BoolOpt) Requires running SRIOV neutron agent for port binding
- agent_required = True
- [sriov_nic]
- # (ListOpt) Comma-separated list of <physical_network>:<network_device>
- # tuples mapping physical network names to the agent's node-specific
- # physical network device interfaces of SR-IOV physical function to be used
- # for VLAN networks. All physical networks listed in network_vlan_ranges on
- # the server should have mappings to appropriate interfaces on each agent.
- #
- # physical_device_mappings =
- #physical_device_mappings = physnet2:p2p2
- #
- # (ListOpt) Comma-separated list of <network_device>:<vfs__to_exclude>
- # tuples, mapping network_device to the agent's node-specific list of virtual
- # functions that should not be used for virtual networking.
- # vfs_to_exclude is a semicolon-separated list of virtual
- # functions to exclude from network_device. The network_device in the
- # mapping should appear in the physical_device_mappings list.
- # exclude_devices =
- # Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3
- root@controller:~# cat /etc/neutron/neutron.conf
- [DEFAULT]
- # Print more verbose output (set logging level to INFO instead of default WARNING level).
- verbose = True
- # =========Start Global Config Option for Distributed L3 Router===============
- # Setting the "router_distributed" flag to "True" will default to the creation
- # of distributed tenant routers. The admin can override this flag by specifying
- # the type of the router on the create request (admin-only attribute). Default
- # value is "False" to support legacy mode (centralized) routers.
- #
- # router_distributed = False
- #
- # ===========End Global Config Option for Distributed L3 Router===============
- # Print debugging output (set logging level to DEBUG instead of default WARNING level).
- debug = True
- # Where to store Neutron state files. This directory must be writable by the
- # user executing the agent.
- # state_path = /var/lib/neutron
- # Where to store lock files
- lock_path = $state_path/lock
- # log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
- # log_date_format = %Y-%m-%d %H:%M:%S
- # use_syslog -> syslog
- # log_file and log_dir -> log_dir/log_file
- # (not log_file) and log_dir -> log_dir/{binary_name}.log
- # use_stderr -> stderr
- # (not user_stderr) and (not log_file) -> stdout
- # publish_errors -> notification system
- # use_syslog = False
- # syslog_log_facility = LOG_USER
- # use_stderr = True
- # log_file =
- # log_dir =
- # publish_errors = False
- # Address to bind the API server to
- # bind_host = 0.0.0.0
- # Port the bind the API server to
- # bind_port = 9696
- # Path to the extensions. Note that this can be a colon-separated list of
- # paths. For example:
- # api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
- # The __path__ of neutron.extensions is appended to this, so if your
- # extensions are in there you don't need to specify them here
- # api_extensions_path =
- # (StrOpt) Neutron core plugin entrypoint to be loaded from the
- # neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
- # plugins included in the neutron source distribution. For compatibility with
- # previous versions, the class name of a plugin can be specified instead of its
- # entrypoint name.
- #
- core_plugin = ml2
- # Example: core_plugin = ml2
- # (ListOpt) List of service plugin entrypoints to be loaded from the
- # neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
- # the plugins included in the neutron source distribution. For compatibility
- # with previous versions, the class name of a plugin can be specified instead
- # of its entrypoint name.
- #
- service_plugins = router
- # Example: service_plugins = router,firewall,lbaas,vpnaas,metering
- # Paste configuration file
- # api_paste_config = api-paste.ini
- # The strategy to be used for auth.
- # Supported values are 'keystone'(default), 'noauth'.
- auth_strategy = keystone
- # Base MAC address. The first 3 octets will remain unchanged. If the
- # 4h octet is not 00, it will also be used. The others will be
- # randomly generated.
- # 3 octet
- # base_mac = fa:16:3e:00:00:00
- # 4 octet
- # base_mac = fa:16:3e:4f:00:00
- # DVR Base MAC address. The first 3 octets will remain unchanged. If the
- # 4th octet is not 00, it will also be used. The others will be randomly
- # generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
- # avoid mixing them up with MAC's allocated for tenant ports.
- # A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
- # The default is 3 octet
- # dvr_base_mac = fa:16:3f:00:00:00
- # Maximum amount of retries to generate a unique MAC address
- # mac_generation_retries = 16
- # DHCP Lease duration (in seconds). Use -1 to
- # tell dnsmasq to use infinite lease times.
- # dhcp_lease_duration = 86400
- # Allow sending resource operation notification to DHCP agent
- # dhcp_agent_notification = True
- # Enable or disable bulk create/update/delete operations
- # allow_bulk = True
- # Enable or disable pagination
- # allow_pagination = False
- # Enable or disable sorting
- # allow_sorting = False
- # Enable or disable overlapping IPs for subnets
- # Attention: the following parameter MUST be set to False if Neutron is
- # being used in conjunction with nova security groups
- allow_overlapping_ips = True
- # Ensure that configured gateway is on subnet. For IPv6, validate only if
- # gateway is not a link local address. Deprecated, to be removed during the
- # K release, at which point the check will be mandatory.
- # force_gateway_on_subnet = True
- # Default maximum number of items returned in a single response,
- # value == infinite and value < 0 means no max limit, and value must
- # be greater than 0. If the number of items requested is greater than
- # pagination_max_limit, server will just return pagination_max_limit
- # of number of items.
- # pagination_max_limit = -1
- # Maximum number of DNS nameservers per subnet
- # max_dns_nameservers = 5
- # Maximum number of host routes per subnet
- # max_subnet_host_routes = 20
- # Maximum number of fixed ips per port
- # max_fixed_ips_per_port = 5
- # Maximum number of routes per router
- # max_routes = 30
- # =========== items for agent management extension =============
- # Seconds to regard the agent as down; should be at least twice
- # report_interval, to be sure the agent is down for good
- # agent_down_time = 75
- # =========== end of items for agent management extension =====
- # =========== items for agent scheduler extension =============
- # Driver to use for scheduling network to DHCP agent
- # network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
- # Driver to use for scheduling router to a default L3 agent
- # router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
- # Driver to use for scheduling a loadbalancer pool to an lbaas agent
- # loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
- # Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
- # networks to first DHCP agent which sends get_active_networks message to
- # neutron server
- # network_auto_schedule = True
- # Allow auto scheduling routers to L3 agent. It will schedule non-hosted
- # routers to first L3 agent which sends sync_routers message to neutron server
- # router_auto_schedule = True
- # Allow automatic rescheduling of routers from dead L3 agents with
- # admin_state_up set to True to alive agents.
- # allow_automatic_l3agent_failover = False
- # Number of DHCP agents scheduled to host a network. This enables redundant
- # DHCP agents for configured networks.
- # dhcp_agents_per_network = 1
- # =========== end of items for agent scheduler extension =====
- # =========== items for l3 extension ==============
- # Enable high availability for virtual routers.
- # l3_ha = False
- #
- # Maximum number of l3 agents which a HA router will be scheduled on. If it
- # is set to 0 the router will be scheduled on every agent.
- # max_l3_agents_per_router = 3
- #
- # Minimum number of l3 agents which a HA router will be scheduled on. The
- # default value is 2.
- # min_l3_agents_per_router = 2
- #
- # CIDR of the administrative network if HA mode is enabled
- # l3_ha_net_cidr = 169.254.192.0/18
- # =========== end of items for l3 extension =======
- # =========== WSGI parameters related to the API server ==============
- # Number of separate worker processes to spawn. The default, 0, runs the
- # worker thread in the current process. Greater than 0 launches that number of
- # child processes as workers. The parent process manages them.
- # api_workers = 0
- # Number of separate RPC worker processes to spawn. The default, 0, runs the
- # worker thread in the current process. Greater than 0 launches that number of
- # child processes as RPC workers. The parent process manages them.
- # This feature is experimental until issues are addressed and testing has been
- # enabled for various plugins for compatibility.
- # rpc_workers = 0
- # Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
- # starting API server. Not supported on OS X.
- # tcp_keepidle = 600
- # Number of seconds to keep retrying to listen
- # retry_until_window = 30
- # Number of backlog requests to configure the socket with.
- # backlog = 4096
- # Max header line to accommodate large tokens
- # max_header_line = 16384
- # Enable SSL on the API server
- # use_ssl = False
- # Certificate file to use when starting API server securely
- # ssl_cert_file = /path/to/certfile
- # Private key file to use when starting API server securely
- # ssl_key_file = /path/to/keyfile
- # CA certificate file to use when starting API server securely to
- # verify connecting clients. This is an optional parameter only required if
- # API clients need to authenticate to the API server using SSL certificates
- # signed by a trusted CA
- # ssl_ca_file = /path/to/cafile
- # ======== end of WSGI parameters related to the API server ==========
- # ======== neutron nova interactions ==========
- # Send notification to nova when port status is active.
- notify_nova_on_port_status_changes = True
- # Send notifications to nova when port data (fixed_ips/floatingips) change
- # so nova can update it's cache.
- notify_nova_on_port_data_changes = True
- # URL for connection to nova (Only supports one nova region currently).
- nova_url = http://controller:8774/v2
- # Name of nova region to use. Useful if keystone manages more than one region
- nova_region_name = regionOne
- # Username for connection to nova in admin context
- nova_admin_username = admin
- # The uuid of the admin nova tenant
- nova_admin_tenant_id = 32bb085e0a064307b0e590cb31d36ed2
- # Password for connection to nova in admin context.
- nova_admin_password = password
- # Authorization URL for connection to nova in admin context.
- nova_admin_auth_url = http://controller:35357/v2.0
- # CA file for novaclient to verify server certificates
- # nova_ca_certificates_file =
- # Boolean to control ignoring SSL errors on the nova url
- # nova_api_insecure = False
- # Number of seconds between sending events to nova if there are any events to send
- # send_events_interval = 2
- # ======== end of neutron nova interactions ==========
- #
- # Options defined in oslo.messaging
- #
- # Use durable queues in amqp. (boolean value)
- # Deprecated group/name - [DEFAULT]/rabbit_durable_queues
- #amqp_durable_queues=false
- # Auto-delete queues in amqp. (boolean value)
- #amqp_auto_delete=false
- # Size of RPC connection pool. (integer value)
- #rpc_conn_pool_size=30
- # Qpid broker hostname. (string value)
- #qpid_hostname=localhost
- # Qpid broker port. (integer value)
- #qpid_port=5672
- # Qpid HA cluster host:port pairs. (list value)
- #qpid_hosts=$qpid_hostname:$qpid_port
- # Username for Qpid connection. (string value)
- #qpid_username=
- # Password for Qpid connection. (string value)
- #qpid_password=
- # Space separated list of SASL mechanisms to use for auth.
- # (string value)
- #qpid_sasl_mechanisms=
- # Seconds between connection keepalive heartbeats. (integer
- # value)
- #qpid_heartbeat=60
- # Transport to use, either 'tcp' or 'ssl'. (string value)
- #qpid_protocol=tcp
- # Whether to disable the Nagle algorithm. (boolean value)
- #qpid_tcp_nodelay=true
- # The qpid topology version to use. Version 1 is what was
- # originally used by impl_qpid. Version 2 includes some
- # backwards-incompatible changes that allow broker federation
- # to work. Users should update to version 2 when they are
- # able to take everything down, as it requires a clean break.
- # (integer value)
- #qpid_topology_version=1
- # SSL version to use (valid only if SSL enabled). valid values
- # are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
- # distributions. (string value)
- #kombu_ssl_version=
- # SSL key file (valid only if SSL enabled). (string value)
- #kombu_ssl_keyfile=
- # SSL cert file (valid only if SSL enabled). (string value)
- #kombu_ssl_certfile=
- # SSL certification authority file (valid only if SSL
- # enabled). (string value)
- #kombu_ssl_ca_certs=
- # How long to wait before reconnecting in response to an AMQP
- # consumer cancel notification. (floating point value)
- #kombu_reconnect_delay=1.0
- # The RabbitMQ broker address where a single node is used.
- # (string value)
- rabbit_host=controller
- # The RabbitMQ broker port where a single node is used.
- # (integer value)
- #rabbit_port=5672
- # RabbitMQ HA cluster host:port pairs. (list value)
- #rabbit_hosts=$rabbit_host:$rabbit_port
- # Connect over SSL for RabbitMQ. (boolean value)
- #rabbit_use_ssl=false
- # The RabbitMQ userid. (string value)
- #rabbit_userid=guest
- # The RabbitMQ password. (string value)
- rabbit_password=password
- # the RabbitMQ login method (string value)
- #rabbit_login_method=AMQPLAIN
- # The RabbitMQ virtual host. (string value)
- #rabbit_virtual_host=/
- # How frequently to retry connecting with RabbitMQ. (integer
- # value)
- #rabbit_retry_interval=1
- # How long to backoff for between retries when connecting to
- # RabbitMQ. (integer value)
- #rabbit_retry_backoff=2
- # Maximum number of RabbitMQ connection retries. Default is 0
- # (infinite retry count). (integer value)
- #rabbit_max_retries=0
- # Use HA queues in RabbitMQ (x-ha-policy: all). If you change
- # this option, you must wipe the RabbitMQ database. (boolean
- # value)
- #rabbit_ha_queues=false
- # If passed, use a fake RabbitMQ provider. (boolean value)
- #fake_rabbit=false
- # ZeroMQ bind address. Should be a wildcard (*), an ethernet
- # interface, or IP. The "host" option should point or resolve
- # to this address. (string value)
- #rpc_zmq_bind_address=*
- # MatchMaker driver. (string value)
- #rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
- # ZeroMQ receiver listening port. (integer value)
- #rpc_zmq_port=9501
- # Number of ZeroMQ contexts, defaults to 1. (integer value)
- #rpc_zmq_contexts=1
- # Maximum number of ingress messages to locally buffer per
- # topic. Default is unlimited. (integer value)
- #rpc_zmq_topic_backlog=<None>
- # Directory for holding IPC sockets. (string value)
- #rpc_zmq_ipc_dir=/var/run/openstack
- # Name of this node. Must be a valid hostname, FQDN, or IP
- # address. Must match "host" option, if running Nova. (string
- # value)
- #rpc_zmq_host=oslo
- # Seconds to wait before a cast expires (TTL). Only supported
- # by impl_zmq. (integer value)
- #rpc_cast_timeout=30
- # Heartbeat frequency. (integer value)
- #matchmaker_heartbeat_freq=300
- # Heartbeat time-to-live. (integer value)
- #matchmaker_heartbeat_ttl=600
- # Size of RPC greenthread pool. (integer value)
- #rpc_thread_pool_size=64
- # Driver or drivers to handle sending notifications. (multi
- # valued)
- #notification_driver=
- # AMQP topic used for OpenStack notifications. (list value)
- # Deprecated group/name - [rpc_notifier2]/topics
- #notification_topics=notifications
- # Seconds to wait for a response from a call. (integer value)
- #rpc_response_timeout=60
- # A URL representing the messaging driver to use and its full
- # configuration. If not set, we fall back to the rpc_backend
- # option and driver specific configuration. (string value)
- #transport_url=<None>
- # The messaging driver to use, defaults to rabbit. Other
- # drivers include qpid and zmq. (string value)
- rpc_backend=rabbit
- # The default exchange under which topics are scoped. May be
- # overridden by an exchange name specified in the
- # transport_url option. (string value)
- #control_exchange=openstack
- [matchmaker_redis]
- #
- # Options defined in oslo.messaging
- #
- # Host to locate redis. (string value)
- #host=127.0.0.1
- # Use this port to connect to redis host. (integer value)
- #port=6379
- # Password for Redis server (optional). (string value)
- #password=<None>
- [matchmaker_ring]
- #
- # Options defined in oslo.messaging
- #
- # Matchmaker ring file (JSON). (string value)
- # Deprecated group/name - [DEFAULT]/matchmaker_ringfile
- #ringfile=/etc/oslo/matchmaker_ring.json
- [quotas]
- # Default driver to use for quota checks
- # quota_driver = neutron.db.quota_db.DbQuotaDriver
- # Resource name(s) that are supported in quota features
- # quota_items = network,subnet,port
- # Default number of resource allowed per tenant. A negative value means
- # unlimited.
- # default_quota = -1
- # Number of networks allowed per tenant. A negative value means unlimited.
- # quota_network = 10
- # Number of subnets allowed per tenant. A negative value means unlimited.
- # quota_subnet = 10
- # Number of ports allowed per tenant. A negative value means unlimited.
- # quota_port = 50
- # Number of security groups allowed per tenant. A negative value means
- # unlimited.
- # quota_security_group = 10
- # Number of security group rules allowed per tenant. A negative value means
- # unlimited.
- # quota_security_group_rule = 100
- # Number of vips allowed per tenant. A negative value means unlimited.
- # quota_vip = 10
- # Number of pools allowed per tenant. A negative value means unlimited.
- # quota_pool = 10
- # Number of pool members allowed per tenant. A negative value means unlimited.
- # The default is unlimited because a member is not a real resource consumer
- # on Openstack. However, on back-end, a member is a resource consumer
- # and that is the reason why quota is possible.
- # quota_member = -1
- # Number of health monitors allowed per tenant. A negative value means
- # unlimited.
- # The default is unlimited because a health monitor is not a real resource
- # consumer on Openstack. However, on back-end, a member is a resource consumer
- # and that is the reason why quota is possible.
- # quota_health_monitor = -1
- # Number of routers allowed per tenant. A negative value means unlimited.
- # quota_router = 10
- # Number of floating IPs allowed per tenant. A negative value means unlimited.
- # quota_floatingip = 50
- # Number of firewalls allowed per tenant. A negative value means unlimited.
- # quota_firewall = 1
- # Number of firewall policies allowed per tenant. A negative value means
- # unlimited.
- # quota_firewall_policy = 1
- # Number of firewall rules allowed per tenant. A negative value means
- # unlimited.
- # quota_firewall_rule = 100
- [agent]
- # Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
- # root filter facility.
- # Change to "sudo" to skip the filtering and just run the comand directly
- root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
- # =========== items for agent management extension =============
- # seconds between nodes reporting state to server; should be less than
- # agent_down_time, best if it is half or less than agent_down_time
- # report_interval = 30
- # =========== end of items for agent management extension =====
- [keystone_authtoken]
- auth_uri = http://controller:5000/v2.0
- identity_uri = http://controller:35357
- admin_tenant_name = service
- admin_user = neutron
- admin_password = password
- #auth_host = 127.0.0.1
- #auth_port = 35357
- #auth_protocol = http
- #admin_tenant_name = %SERVICE_TENANT_NAME%
- #admin_user = %SERVICE_USER%
- #admin_password = %SERVICE_PASSWORD%
- [database]
- # This line MUST be changed to actually run the plugin.
- # Example:
- # connection = mysql://root:pass@127.0.0.1:3306/neutron
- # Replace 127.0.0.1 above with the IP address of the database used by the
- # main neutron server. (Leave it as is if the database runs on this host.)
- #connection = sqlite:////var/lib/neutron/neutron.sqlite
- connection = mysql://neutron:password@controller/neutron
- # NOTE: In deployment the [database] section and its connection attribute may
- # be set in the corresponding core plugin '.ini' file. However, it is suggested
- # to put the [database] section and its connection attribute in this
- # configuration file.
- # Database engine for which script will be generated when using offline
- # migration
- # engine =
- # The SQLAlchemy connection string used to connect to the slave database
- # slave_connection =
- # Database reconnection retry times - in event connectivity is lost
- # set to -1 implies an infinite retry count
- # max_retries = 10
- # Database reconnection interval in seconds - if the initial connection to the
- # database fails
- # retry_interval = 10
- # Minimum number of SQL connections to keep open in a pool
- # min_pool_size = 1
- # Maximum number of SQL connections to keep open in a pool
- # max_pool_size = 10
- # Timeout in seconds before idle sql connections are reaped
- # idle_timeout = 3600
- # If set, use this value for max_overflow with sqlalchemy
- # max_overflow = 20
- # Verbosity of SQL debugging information. 0=None, 100=Everything
- # connection_debug = 0
- # Add python stack traces to SQL as comment strings
- # connection_trace = False
- # If set, use this value for pool_timeout with sqlalchemy
- # pool_timeout = 10
- [service_providers]
- # Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
- # Must be in form:
- # service_provider=<service_type>:<name>:<driver>[:default]
- # List of allowed service types includes LOADBALANCER, FIREWALL, VPN
- # Combination of <service type> and <name> must be unique; <driver> must also be unique
- # This is multiline option, example for default provider:
- # service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
- # example of non-default provider:
- # service_provider=FIREWALL:name2:firewall_driver_path
- # --- Reference implementations ---
- service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
- service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
- # In order to activate Radware's lbaas driver you need to uncomment the next line.
- # If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
- # Otherwise comment the HA Proxy line
- # service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
- # uncomment the following line to make the 'netscaler' LBaaS provider available.
- # service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
- # Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
- # service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
- # Uncomment the line below to use Embrane heleos as Load Balancer service provider.
- # service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
- # Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'.
- #service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default
- # Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend
- # service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement