')) transport_zone_config = self._convert_to_nsx_transport_zones(
self.cluster, net_data)
external = net_data.get(ext_net_extn.EXTERNAL)
# NOTE(salv-orlando): Pre-generating uuid for Neutron
# network. This will be removed once the network create operation
# becomes an asynchronous task
net_data['id'] = str(uuid.uuid4())
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
lswitch = switchlib.create_lswitch(
self.cluster, net_data['id'],
tenant_id, net_data.get('name'),
transport_zone_config,
shared=net_data.get(attr.SHARED))
with context.session.begin(subtransactions=True):
new_net = super(NsxPluginV2, self).create_network(context,
network)
# Process port security extension
self._process_network_port_security_create(
context, net_data, new_net)
# DB Operations for setting the network as external
self._process_l3_create(context, new_net, net_data)
# Process QoS queue extension
net_queue_id = net_data.get(qos.QUEUE)
if net_queue_id:
# Raises if not found
self.get_qos_queue(context, net_queue_id)
self._process_network_queue_mapping(
context, new_net, net_queue_id)
# Add mapping between neutron network and NSX switch
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
nsx_db.add_neutron_nsx_network_mapping(
context.session, new_net['id'],
lswitch['uuid'])
if (net_data.get(mpnet.SEGMENTS) and
isinstance(provider_type, bool)):
net_bindings = []
for tz in net_data[mpnet.SEGMENTS]:
net_bindings.append(nsx_db.add_network_binding(
context.session, new_net['id'],
tz.get(pnet.NETWORK_TYPE),
tz.get(pnet.PHYSICAL_NETWORK),
tz.get(pnet.SEGMENTATION_ID, 0)))
if provider_type:
nsx_db.set_multiprovider_network(context.session,
new_net['id'])
self._extend_network_dict_provider(context, new_net,
provider_type,
net_bindings)
self.handle_network_dhcp_access(context, new_net,
action='create_network')
return new_net
**** CubicPower OpenStack Study ****
def delete_network(self, context, id):
external = self._network_is_external(context, id)
# Before deleting ports, ensure the peer of a NSX logical
# port with a patch attachment is removed too
port_filter = {'network_id': [id],
'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]}
router_iface_ports = self.get_ports(context, filters=port_filter)
for port in router_iface_ports:
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, id)
# Before removing entry from Neutron DB, retrieve NSX switch
# identifiers for removing them from backend
if not external:
lswitch_ids = nsx_utils.get_nsx_switch_ids(
context.session, self.cluster, id)
super(NsxPluginV2, self).delete_network(context, id)
# clean up network owned ports
for port in router_iface_ports:
try:
if nsx_port_id:
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, port['device_id'])
routerlib.delete_peer_router_lport(self.cluster,
nsx_router_id,
nsx_switch_id,
nsx_port_id)
else:
LOG.warning(_("A nsx lport identifier was not found for "
"neutron port '%s'. Unable to remove "
"the peer router port for this switch port"),
port['id'])
except (TypeError, KeyError,
api_exc.NsxApiException,
api_exc.ResourceNotFound):
# Do not raise because the issue might as well be that the
# router has already been deleted, so there would be nothing
# to do here
LOG.warning(_("Ignoring exception as this means the peer for "
"port '%s' has already been deleted."),
nsx_port_id)
# Do not go to NSX for external networks
if not external:
try:
switchlib.delete_networks(self.cluster, id, lswitch_ids)
LOG.debug(_("delete_network completed for tenant: %s"),
context.tenant_id)
except n_exc.NotFound:
LOG.warning(_("Did not found lswitch %s in NSX"), id)
self.handle_network_dhcp_access(context, id, action='delete_network')
**** CubicPower OpenStack Study ****
def get_network(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
# goto to the plugin DB and fetch the network
network = self._get_network(context, id)
if (self.nsx_sync_opts.always_read_status or
fields and 'status' in fields):
# External networks are not backed by nsx lswitches
if not network.external:
# Perform explicit state synchronization
self._synchronizer.synchronize_network(context, network)
# Don't do field selection here otherwise we won't be able
# to add provider networks fields
net_result = self._make_network_dict(network)
self._extend_network_dict_provider(context, net_result)
return self._fields(net_result, fields)
**** CubicPower OpenStack Study ****
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
filters = filters or {}
with context.session.begin(subtransactions=True):
networks = (
super(NsxPluginV2, self).get_networks(
context, filters, fields, sorts,
limit, marker, page_reverse))
for net in networks:
self._extend_network_dict_provider(context, net)
return [self._fields(network, fields) for network in networks]
**** CubicPower OpenStack Study ****
def update_network(self, context, id, network):
pnet._raise_if_updates_provider_attributes(network['network'])
if network["network"].get("admin_state_up") is False:
raise NotImplementedError(_("admin_state_up=False networks "
"are not supported."))
with context.session.begin(subtransactions=True):
net = super(NsxPluginV2, self).update_network(context, id, network)
if psec.PORTSECURITY in network['network']:
self._process_network_port_security_update(
context, network['network'], net)
net_queue_id = network['network'].get(qos.QUEUE)
if net_queue_id:
self._delete_network_queue_mapping(context, id)
self._process_network_queue_mapping(context, net, net_queue_id)
self._process_l3_update(context, net, network['network'])
self._extend_network_dict_provider(context, net)
return net
**** CubicPower OpenStack Study ****
def create_port(self, context, port):
# If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED
# then we pass the port to the policy engine. The reason why we don't
# pass the value to the policy engine when the port is
# ATTR_NOT_SPECIFIED is for the case where a port is created on a
# shared network that is not owned by the tenant.
port_data = port['port']
# Set port status as 'DOWN'. This will be updated by backend sync.
port_data['status'] = constants.PORT_STATUS_DOWN
with context.session.begin(subtransactions=True):
# First we allocate port in neutron database
neutron_db = super(NsxPluginV2, self).create_port(context, port)
neutron_port_id = neutron_db['id']
# Update fields obtained from neutron db (eg: MAC address)
port["port"].update(neutron_db)
self.handle_port_metadata_access(context, neutron_db)
# port security extension checks
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, port_data)
port_data[psec.PORTSECURITY] = port_security
self._process_port_port_security_create(
context, port_data, neutron_db)
# allowed address pair checks
if attr.is_attr_set(port_data.get(addr_pair.ADDRESS_PAIRS)):
if not port_security:
raise addr_pair.AddressPairAndPortSecurityRequired()
else:
self._process_create_allowed_address_pairs(
context, neutron_db,
port_data[addr_pair.ADDRESS_PAIRS])
else:
# remove ATTR_NOT_SPECIFIED
port_data[addr_pair.ADDRESS_PAIRS] = None
# security group extension checks
if port_security and has_ip:
self._ensure_default_security_group_on_port(context, port)
elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
port_data[ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._process_port_create_security_group(
context, port_data, port_data[ext_sg.SECURITYGROUPS])
# QoS extension checks
port_queue_id = self._check_for_queue_and_create(
context, port_data)
self._process_port_queue_mapping(
context, port_data, port_queue_id)
if (isinstance(port_data.get(mac_ext.MAC_LEARNING), bool)):
self._create_mac_learning_state(context, port_data)
elif mac_ext.MAC_LEARNING in port_data:
port_data.pop(mac_ext.MAC_LEARNING)
LOG.debug(_("create_port completed on NSX for tenant "
"%(tenant_id)s: (%(id)s)"), port_data)
self._process_portbindings_create_and_update(context,
port['port'],
port_data)
# DB Operation is complete, perform NSX operation
try:
port_data = port['port'].copy()
port_create_func = self._port_drivers['create'].get(
port_data['device_owner'],
self._port_drivers['create']['default'])
port_create_func(context, port_data)
except n_exc.NotFound:
LOG.warning(_("Logical switch for network %s was not "
"found in NSX."), port_data['network_id'])
# Put port in error on neutron DB
with context.session.begin(subtransactions=True):
port = self._get_port(context, neutron_port_id)
port_data['status'] = constants.PORT_STATUS_ERROR
port['status'] = port_data['status']
context.session.add(port)
except Exception:
# Port must be removed from neutron DB
with excutils.save_and_reraise_exception():
LOG.error(_("Unable to create port or set port "
"attachment in NSX."))
with context.session.begin(subtransactions=True):
self._delete_port(context, neutron_port_id)
self.handle_port_dhcp_access(context, port_data, action='create_port')
return port_data
**** CubicPower OpenStack Study ****
def update_port(self, context, id, port):
changed_fixed_ips = 'fixed_ips' in port['port']
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
port)
has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
with context.session.begin(subtransactions=True):
ret_port = super(NsxPluginV2, self).update_port(
context, id, port)
# Save current mac learning state to check whether it's
# being updated or not
old_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING)
# copy values over - except fixed_ips as
# they've already been processed
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
tenant_id = self._get_tenant_id_for_create(context, ret_port)
# populate port_security setting
if psec.PORTSECURITY not in port['port']:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id)
has_ip = self._ip_on_port(ret_port)
# validate port security and allowed address pairs
if not ret_port[psec.PORTSECURITY]:
# has address pairs in request
if has_addr_pairs:
raise addr_pair.AddressPairAndPortSecurityRequired()
elif not delete_addr_pairs:
# check if address pairs are in db
ret_port[addr_pair.ADDRESS_PAIRS] = (
self.get_allowed_address_pairs(context, id))
if ret_port[addr_pair.ADDRESS_PAIRS]:
raise addr_pair.AddressPairAndPortSecurityRequired()
if (delete_addr_pairs or has_addr_pairs):
# delete address pairs and read them in
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, ret_port, ret_port[addr_pair.ADDRESS_PAIRS])
elif changed_fixed_ips:
self._check_fixed_ips_and_address_pairs_no_overlap(context,
ret_port)
# checks if security groups were updated adding/modifying
# security groups, port security is set and port has ip
if not (has_ip and ret_port[psec.PORTSECURITY]):
if has_security_groups:
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Update did not have security groups passed in. Check
# that port does not have any security groups already on it.
filters = {'port_id': [id]}
security_groups = (
super(NsxPluginV2, self)._get_port_security_group_bindings(
context, filters)
)
if security_groups and not delete_security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
if (delete_security_groups or has_security_groups):
# delete the port binding and read it with the new rules.
self._delete_port_security_group_bindings(context, id)
sgids = self._get_security_groups_on_port(context, port)
self._process_port_create_security_group(context, ret_port,
sgids)
if psec.PORTSECURITY in port['port']:
self._process_port_port_security_update(
context, port['port'], ret_port)
port_queue_id = self._check_for_queue_and_create(
context, ret_port)
# Populate the mac learning attribute
new_mac_learning_state = port['port'].get(mac_ext.MAC_LEARNING)
if (new_mac_learning_state is not None and
old_mac_learning_state != new_mac_learning_state):
self._update_mac_learning_state(context, id,
new_mac_learning_state)
ret_port[mac_ext.MAC_LEARNING] = new_mac_learning_state
self._delete_port_queue_mapping(context, ret_port['id'])
self._process_port_queue_mapping(context, ret_port,
port_queue_id)
LOG.debug(_("Updating port: %s"), port)
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, id)
# Convert Neutron security groups identifiers into NSX security
# profiles identifiers
nsx_sec_profile_ids = [
nsx_utils.get_nsx_security_group_id(
context.session, self.cluster, neutron_sg_id) for
neutron_sg_id in (ret_port[ext_sg.SECURITYGROUPS] or [])]
if nsx_port_id:
try:
switchlib.update_port(
self.cluster, nsx_switch_id, nsx_port_id,
id, tenant_id,
ret_port['name'],
ret_port['device_id'],
ret_port['admin_state_up'],
ret_port['mac_address'],
ret_port['fixed_ips'],
ret_port[psec.PORTSECURITY],
nsx_sec_profile_ids,
ret_port[qos.QUEUE],
ret_port.get(mac_ext.MAC_LEARNING),
ret_port.get(addr_pair.ADDRESS_PAIRS))
# Update the port status from nsx. If we fail here hide it
# since the port was successfully updated but we were not
# able to retrieve the status.
ret_port['status'] = switchlib.get_port_status(
self.cluster, nsx_switch_id,
nsx_port_id)
# FIXME(arosen) improve exception handling.
except Exception:
ret_port['status'] = constants.PORT_STATUS_ERROR
LOG.exception(_("Unable to update port id: %s."),
nsx_port_id)
# If nsx_port_id is not in database or in nsx put in error state.
else:
ret_port['status'] = constants.PORT_STATUS_ERROR
self._process_portbindings_create_and_update(context,
port['port'],
ret_port)
return ret_port
**** CubicPower OpenStack Study ****
def delete_port(self, context, id, l3_port_check=True,
nw_gw_port_check=True):
"""Deletes a port on a specified Virtual Network.
If the port contains a remote interface attachment, the remote
interface is first un-plugged and then the port is deleted.
:returns: None
:raises: exception.PortInUse
:raises: exception.PortNotFound
:raises: exception.NetworkNotFound
"""
# if needed, check to see if this is a port owned by
# a l3 router. If so, we should prevent deletion here
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
neutron_db_port = self.get_port(context, id)
# Perform the same check for ports owned by layer-2 gateways
if nw_gw_port_check:
self.prevent_network_gateway_port_deletion(context,
neutron_db_port)
port_delete_func = self._port_drivers['delete'].get(
neutron_db_port['device_owner'],
self._port_drivers['delete']['default'])
port_delete_func(context, neutron_db_port)
self.disassociate_floatingips(context, id)
with context.session.begin(subtransactions=True):
queue = self._get_port_queue_bindings(context, {'port_id': [id]})
# metadata_dhcp_host_route
self.handle_port_metadata_access(
context, neutron_db_port, is_delete=True)
super(NsxPluginV2, self).delete_port(context, id)
# Delete qos queue if possible
if queue:
self.delete_qos_queue(context, queue[0]['queue_id'], False)
self.handle_port_dhcp_access(
context, neutron_db_port, action='delete_port')
**** CubicPower OpenStack Study ****
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
if (self.nsx_sync_opts.always_read_status or
fields and 'status' in fields):
# Perform explicit state synchronization
db_port = self._get_port(context, id)
self._synchronizer.synchronize_port(
context, db_port)
return self._make_port_dict(db_port, fields)
else:
return super(NsxPluginV2, self).get_port(context, id, fields)
**** CubicPower OpenStack Study ****
def get_router(self, context, id, fields=None):
if (self.nsx_sync_opts.always_read_status or
fields and 'status' in fields):
db_router = self._get_router(context, id)
# Perform explicit state synchronization
self._synchronizer.synchronize_router(
context, db_router)
return self._make_router_dict(db_router, fields)
else:
return super(NsxPluginV2, self).get_router(context, id, fields)
**** CubicPower OpenStack Study ****
def _create_lrouter(self, context, router, nexthop):
tenant_id = self._get_tenant_id_for_create(context, router)
distributed = router.get('distributed')
try:
lrouter = routerlib.create_lrouter(
self.cluster, router['id'],
tenant_id, router['name'], nexthop,
distributed=attr.is_attr_set(distributed) and distributed)
except nsx_exc.InvalidVersion:
msg = _("Cannot create a distributed router with the NSX "
"platform currently in execution. Please, try "
"without specifying the 'distributed' attribute.")
LOG.exception(msg)
raise n_exc.BadRequest(resource='router', msg=msg)
except api_exc.NsxApiException:
err_msg = _("Unable to create logical router on NSX Platform")
LOG.exception(err_msg)
raise nsx_exc.NsxPluginException(err_msg=err_msg)
# Create the port here - and update it later if we have gw_info
try:
self._create_and_attach_router_port(
self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True},
"L3GatewayAttachment",
self.cluster.default_l3_gw_service_uuid)
except nsx_exc.NsxPluginException:
LOG.exception(_("Unable to create L3GW port on logical router "
"%(router_uuid)s. Verify Default Layer-3 Gateway "
"service %(def_l3_gw_svc)s id is correct"),
{'router_uuid': lrouter['uuid'],
'def_l3_gw_svc':
self.cluster.default_l3_gw_service_uuid})
# Try and remove logical router from NSX
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
# Return user a 500 with an apter message
raise nsx_exc.NsxPluginException(
err_msg=(_("Unable to create router %s on NSX backend") %
router['id']))
lrouter['status'] = plugin_const.ACTIVE
return lrouter
**** CubicPower OpenStack Study ****
def create_router(self, context, router):
# NOTE(salvatore-orlando): We completely override this method in
# order to be able to use the NSX ID as Neutron ID
# TODO(salvatore-orlando): Propose upstream patch for allowing
# 3rd parties to specify IDs as we do with l2 plugin
r = router['router']
has_gw_info = False
tenant_id = self._get_tenant_id_for_create(context, r)
# default value to set - nsx wants it (even if we don't have it)
nexthop = NSX_DEFAULT_NEXTHOP
# if external gateway info are set, then configure nexthop to
# default external gateway
if 'external_gateway_info' in r and r.get('external_gateway_info'):
has_gw_info = True
gw_info = r['external_gateway_info']
del r['external_gateway_info']
# The following DB read will be performed again when updating
# gateway info. This is not great, but still better than
# creating NSX router here and updating it later
network_id = (gw_info.get('network_id', None) if gw_info
else None)
if network_id:
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
nexthop = ext_subnet.gateway_ip
# NOTE(salv-orlando): Pre-generating uuid for Neutron
# router. This will be removed once the router create operation
# becomes an asynchronous task
neutron_router_id = str(uuid.uuid4())
r['id'] = neutron_router_id
lrouter = self._create_lrouter(context, r, nexthop)
# Update 'distributed' with value returned from NSX
# This will be useful for setting the value if the API request
# did not specify any value for the 'distributed' attribute
# Platforms older than 3.x do not support the attribute
r['distributed'] = lrouter.get('distributed', False)
# TODO(salv-orlando): Deal with backend object removal in case
# of db failures
with context.session.begin(subtransactions=True):
# Transaction nesting is needed to avoid foreign key violations
# when processing the distributed router binding
with context.session.begin(subtransactions=True):
router_db = l3_db.Router(id=neutron_router_id,
tenant_id=tenant_id,
name=r['name'],
admin_state_up=r['admin_state_up'],
status=lrouter['status'])
context.session.add(router_db)
self._process_nsx_router_create(context, router_db, r)
# Ensure neutron router is moved into the transaction's buffer
context.session.flush()
# Add mapping between neutron and nsx identifiers
nsx_db.add_neutron_nsx_router_mapping(
context.session, router_db['id'], lrouter['uuid'])
if has_gw_info:
# NOTE(salv-orlando): This operation has been moved out of the
# database transaction since it performs several NSX queries,
# ithis ncreasing the risk of deadlocks between eventlet and
# sqlalchemy operations.
# Set external gateway and remove router in case of failure
try:
self._update_router_gw_info(context, router_db['id'], gw_info)
except (n_exc.NeutronException, api_exc.NsxApiException):
with excutils.save_and_reraise_exception():
# As setting gateway failed, the router must be deleted
# in order to ensure atomicity
router_id = router_db['id']
LOG.warn(_("Failed to set gateway info for router being "
"created:%s - removing router"), router_id)
self.delete_router(context, router_id)
LOG.info(_("Create router failed while setting external "
"gateway. Router:%s has been removed from "
"DB and backend"),
router_id)
return self._make_router_dict(router_db)
**** CubicPower OpenStack Study ****
def _update_lrouter(self, context, router_id, name, nexthop, routes=None):
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
return routerlib.update_lrouter(
self.cluster, nsx_router_id, name,
nexthop, routes=routes)
**** CubicPower OpenStack Study ****
def _update_lrouter_routes(self, context, router_id, routes):
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
routerlib.update_explicit_routes_lrouter(
self.cluster, nsx_router_id, routes)
**** CubicPower OpenStack Study ****
def update_router(self, context, router_id, router):
# Either nexthop is updated or should be kept as it was before
r = router['router']
nexthop = None
if 'external_gateway_info' in r and r.get('external_gateway_info'):
gw_info = r['external_gateway_info']
# The following DB read will be performed again when updating
# gateway info. This is not great, but still better than
# creating NSX router here and updating it later
network_id = (gw_info.get('network_id', None) if gw_info
else None)
if network_id:
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
nexthop = ext_subnet.gateway_ip
try:
for route in r.get('routes', []):
if route['destination'] == '0.0.0.0/0':
msg = _("'routes' cannot contain route '0.0.0.0/0', "
"this must be updated through the default "
"gateway attribute")
raise n_exc.BadRequest(resource='router', msg=msg)
previous_routes = self._update_lrouter(
context, router_id, r.get('name'),
nexthop, routes=r.get('routes'))
# NOTE(salv-orlando): The exception handling below is not correct, but
# unfortunately nsxlib raises a neutron notfound exception when an
# object is not found in the underlying backend
except n_exc.NotFound:
# Put the router in ERROR status
with context.session.begin(subtransactions=True):
router_db = self._get_router(context, router_id)
router_db['status'] = constants.NET_STATUS_ERROR
raise nsx_exc.NsxPluginException(
err_msg=_("Logical router %s not found "
"on NSX Platform") % router_id)
except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException(
err_msg=_("Unable to update logical router on NSX Platform"))
except nsx_exc.InvalidVersion:
msg = _("Request cannot contain 'routes' with the NSX "
"platform currently in execution. Please, try "
"without specifying the static routes.")
LOG.exception(msg)
raise n_exc.BadRequest(resource='router', msg=msg)
try:
return super(NsxPluginV2, self).update_router(context,
router_id, router)
except (extraroute.InvalidRoutes,
extraroute.RouterInterfaceInUseByRoute,
extraroute.RoutesExhausted):
with excutils.save_and_reraise_exception():
# revert changes made to NSX
self._update_lrouter_routes(
context, router_id, previous_routes)
**** CubicPower OpenStack Study ****
def _delete_lrouter(self, context, router_id, nsx_router_id):
# The neutron router id (router_id) is ignored in this routine,
# but used in plugins deriving from this one
routerlib.delete_lrouter(self.cluster, nsx_router_id)
**** CubicPower OpenStack Study ****
def delete_router(self, context, router_id):
with context.session.begin(subtransactions=True):
# TODO(salv-orlando): This call should have no effect on delete
# router, but if it does, it should not happen within a
# transaction, and it should be restored on rollback
self.handle_router_metadata_access(
context, router_id, interface=None)
# Pre-delete checks
# NOTE(salv-orlando): These checks will be repeated anyway when
# calling the superclass. This is wasteful, but is the simplest
# way of ensuring a consistent removal of the router both in
# the neutron Database and in the NSX backend.
# TODO(salv-orlando): split pre-delete checks and actual
# deletion in superclass.
# Ensure that the router is not used
fips = self.get_floatingips_count(
context.elevated(), filters={'router_id': [router_id]})
if fips:
raise l3.RouterInUse(router_id=router_id)
device_filter = {'device_id': [router_id],
'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]}
ports = self._core_plugin.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
# It is safe to remove the router from the database, so remove it
# from the backend
try:
self._delete_lrouter(context, router_id, nsx_router_id)
except n_exc.NotFound:
# This is not a fatal error, but needs to be logged
LOG.warning(_("Logical router '%s' not found "
"on NSX Platform"), router_id)
except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException(
err_msg=(_("Unable to delete logical router '%s' "
"on NSX Platform") % nsx_router_id))
# Remove the NSX mapping first in order to ensure a mapping to
# a non-existent NSX router is not left in the DB in case of
# failure while removing the router from the neutron DB
try:
nsx_db.delete_neutron_nsx_router_mapping(
context.session, router_id)
except db_exc.DBError as d_exc:
# Do not make this error fatal
LOG.warn(_("Unable to remove NSX mapping for Neutron router "
"%(router_id)s because of the following exception:"
"%(d_exc)s"), {'router_id': router_id,
'd_exc': str(d_exc)})
# Perform the actual delete on the Neutron DB
super(NsxPluginV2, self).delete_router(context, router_id)
**** CubicPower OpenStack Study ****
def _add_subnet_snat_rule(self, context, router, subnet):
gw_port = router.gw_port
if gw_port and router.enable_snat:
# There is a change gw_port might have multiple IPs
# In that case we will consider only the first one
if gw_port.get('fixed_ips'):
snat_ip = gw_port['fixed_ips'][0]['ip_address']
cidr_prefix = int(subnet['cidr'].split('/')[1])
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router['id'])
routerlib.create_lrouter_snat_rule(
self.cluster, nsx_router_id, snat_ip, snat_ip,
order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix,
match_criteria={'source_ip_addresses': subnet['cidr']})
**** CubicPower OpenStack Study ****
def _delete_subnet_snat_rule(self, context, router, subnet):
# Remove SNAT rule if external gateway is configured
if router.gw_port:
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router['id'])
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "SourceNatRule",
max_num_expected=1, min_num_expected=1,
source_ip_addresses=subnet['cidr'])
**** CubicPower OpenStack Study ****
def add_router_interface(self, context, router_id, interface_info):
# When adding interface by port_id we need to create the
# peer port on the nsx logical router in this routine
port_id = interface_info.get('port_id')
router_iface_info = super(NsxPluginV2, self).add_router_interface(
context, router_id, interface_info)
# router_iface_info will always have a subnet_id attribute
subnet_id = router_iface_info['subnet_id']
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
if port_id:
port_data = self._get_port(context, port_id)
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_id)
# Unplug current attachment from lswitch port
switchlib.plug_vif_interface(self.cluster, nsx_switch_id,
nsx_port_id, "NoAttachment")
# Create logical router port and plug patch attachment
self._create_and_attach_router_port(
self.cluster, context, nsx_router_id, port_data,
"PatchAttachment", nsx_port_id, subnet_ids=[subnet_id])
subnet = self._get_subnet(context, subnet_id)
# If there is an external gateway we need to configure the SNAT rule.
# Fetch router from DB
router = self._get_router(context, router_id)
self._add_subnet_snat_rule(context, router, subnet)
routerlib.create_lrouter_nosnat_rule(
self.cluster, nsx_router_id,
order=NSX_NOSNAT_RULES_ORDER,
match_criteria={'destination_ip_addresses': subnet['cidr']})
# Ensure the NSX logical router has a connection to a 'metadata access'
# network (with a proxy listening on its DHCP port), by creating it
# if needed.
self.handle_router_metadata_access(
context, router_id, interface=router_iface_info)
LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s "
"and router:%(router_id)s"),
{'subnet_id': subnet_id, 'router_id': router_id})
return router_iface_info
**** CubicPower OpenStack Study ****
def remove_router_interface(self, context, router_id, interface_info):
# The code below is duplicated from base class, but comes handy
# as we need to retrieve the router port id before removing the port
subnet = None
subnet_id = None
if 'port_id' in interface_info:
port_id = interface_info['port_id']
# find subnet_id - it is need for removing the SNAT rule
port = self._get_port(context, port_id)
if port.get('fixed_ips'):
subnet_id = port['fixed_ips'][0]['subnet_id']
if not (port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF and
port['device_id'] == router_id):
raise l3.RouterInterfaceNotFound(router_id=router_id,
port_id=port_id)
elif 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
subnet = self._get_subnet(context, subnet_id)
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
port_id = p['id']
break
else:
raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
subnet_id=subnet_id)
# Finally remove the data from the Neutron DB
# This will also destroy the port on the logical switch
info = super(NsxPluginV2, self).remove_router_interface(
context, router_id, interface_info)
try:
# Ensure the connection to the 'metadata access network'
# is removed (with the network) if this the last subnet
# on the router
self.handle_router_metadata_access(
context, router_id, interface=info)
if not subnet:
subnet = self._get_subnet(context, subnet_id)
router = self._get_router(context, router_id)
# If router is enabled_snat = False there are no snat rules to
# delete.
if router.enable_snat:
self._delete_subnet_snat_rule(context, router, subnet)
# Relax the minimum expected number as the nosnat rules
# do not exist in 2.x deployments
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "NoSourceNatRule",
max_num_expected=1, min_num_expected=0,
destination_ip_addresses=subnet['cidr'])
except n_exc.NotFound:
LOG.error(_("Logical router resource %s not found "
"on NSX platform") % router_id)
except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException(
err_msg=(_("Unable to update logical router"
"on NSX Platform")))
return info
**** CubicPower OpenStack Study ****
def _retrieve_and_delete_nat_rules(self, context, floating_ip_address,
internal_ip, nsx_router_id,
min_num_rules_expected=0):
"""Finds and removes NAT rules from a NSX router."""
# NOTE(salv-orlando): The context parameter is ignored in this method
# but used by derived classes
try:
# Remove DNAT rule for the floating IP
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "DestinationNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
destination_ip_addresses=floating_ip_address)
# Remove SNAT rules for the floating IP
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "SourceNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
source_ip_addresses=internal_ip)
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "SourceNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
destination_ip_addresses=internal_ip)
except api_exc.NsxApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("An error occurred while removing NAT rules "
"on the NSX platform for floating ip:%s"),
floating_ip_address)
except nsx_exc.NatRuleMismatch:
# Do not surface to the user
LOG.warning(_("An incorrect number of matching NAT rules "
"was found on the NSX platform"))
**** CubicPower OpenStack Study ****
def _remove_floatingip_address(self, context, fip_db):
# Remove floating IP address from logical router port
# Fetch logical port of router's external gateway
router_id = fip_db.router_id
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
nsx_gw_port_id = routerlib.find_router_gw_port(
context, self.cluster, nsx_router_id)['uuid']
ext_neutron_port_db = self._get_port(context.elevated(),
fip_db.floating_port_id)
nsx_floating_ips = self._build_ip_address_list(
context.elevated(), ext_neutron_port_db['fixed_ips'])
routerlib.update_lrouter_port_ips(self.cluster,
nsx_router_id,
nsx_gw_port_id,
ips_to_add=[],
ips_to_remove=nsx_floating_ips)
**** CubicPower OpenStack Study ****
def _get_fip_assoc_data(self, context, fip, floatingip_db):
if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and
not ('port_id' in fip and fip['port_id'])):
msg = _("fixed_ip_address cannot be specified without a port_id")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
port_id = internal_ip = router_id = None
if 'port_id' in fip and fip['port_id']:
fip_qry = context.session.query(l3_db.FloatingIP)
port_id, internal_ip, router_id = self.get_assoc_data(
context,
fip,
floatingip_db['floating_network_id'])
try:
fip_qry.filter_by(
fixed_port_id=fip['port_id'],
floating_network_id=floatingip_db['floating_network_id'],
fixed_ip_address=internal_ip).one()
raise l3.FloatingIPPortAlreadyAssociated(
port_id=fip['port_id'],
fip_id=floatingip_db['id'],
floating_ip_address=floatingip_db['floating_ip_address'],
fixed_ip=floatingip_db['fixed_ip_address'],
net_id=floatingip_db['floating_network_id'])
except sa_exc.NoResultFound:
pass
return (port_id, internal_ip, router_id)
**** CubicPower OpenStack Study ****
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
"""Update floating IP association data.
Overrides method from base class.
The method is augmented for creating NAT rules in the process.
"""
# Store router currently serving the floating IP
old_router_id = floatingip_db.router_id
port_id, internal_ip, router_id = self._get_fip_assoc_data(
context, fip, floatingip_db)
floating_ip = floatingip_db['floating_ip_address']
# If there's no association router_id will be None
if router_id:
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
self._retrieve_and_delete_nat_rules(
context, floating_ip, internal_ip, nsx_router_id)
# Fetch logical port of router's external gateway
# Fetch logical port of router's external gateway
nsx_floating_ips = self._build_ip_address_list(
context.elevated(), external_port['fixed_ips'])
floating_ip = floatingip_db['floating_ip_address']
# Retrieve and delete existing NAT rules, if any
if old_router_id:
nsx_old_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, old_router_id)
# Retrieve the current internal ip
_p, _s, old_internal_ip = self._internal_fip_assoc_data(
context, {'id': floatingip_db.id,
'port_id': floatingip_db.fixed_port_id,
'fixed_ip_address': floatingip_db.fixed_ip_address,
'tenant_id': floatingip_db.tenant_id})
nsx_gw_port_id = routerlib.find_router_gw_port(
context, self.cluster, nsx_old_router_id)['uuid']
self._retrieve_and_delete_nat_rules(
context, floating_ip, old_internal_ip, nsx_old_router_id)
routerlib.update_lrouter_port_ips(
self.cluster, nsx_old_router_id, nsx_gw_port_id,
ips_to_add=[], ips_to_remove=nsx_floating_ips)
if router_id:
nsx_gw_port_id = routerlib.find_router_gw_port(
context, self.cluster, nsx_router_id)['uuid']
# Re-create NAT rules only if a port id is specified
if fip.get('port_id'):
try:
# Setup DNAT rules for the floating IP
routerlib.create_lrouter_dnat_rule(
self.cluster, nsx_router_id, internal_ip,
order=NSX_FLOATINGIP_NAT_RULES_ORDER,
match_criteria={'destination_ip_addresses':
floating_ip})
# Setup SNAT rules for the floating IP
# Create a SNAT rule for enabling connectivity to the
# floating IP from the same network as the internal port
# Find subnet id for internal_ip from fixed_ips
internal_port = self._get_port(context, port_id)
# Cchecks not needed on statements below since otherwise
# _internal_fip_assoc_data would have raised
subnet_ids = [ip['subnet_id'] for ip in
internal_port['fixed_ips'] if
ip['ip_address'] == internal_ip]
internal_subnet_cidr = self._build_ip_address_list(
context, internal_port['fixed_ips'],
subnet_ids=subnet_ids)[0]
routerlib.create_lrouter_snat_rule(
self.cluster, nsx_router_id, floating_ip, floating_ip,
order=NSX_NOSNAT_RULES_ORDER - 1,
match_criteria={'source_ip_addresses':
internal_subnet_cidr,
'destination_ip_addresses':
internal_ip})
# setup snat rule such that src ip of a IP packet when
# using floating is the floating ip itself.
routerlib.create_lrouter_snat_rule(
self.cluster, nsx_router_id, floating_ip, floating_ip,
order=NSX_FLOATINGIP_NAT_RULES_ORDER,
match_criteria={'source_ip_addresses': internal_ip})
# Add Floating IP address to router_port
routerlib.update_lrouter_port_ips(
self.cluster, nsx_router_id, nsx_gw_port_id,
ips_to_add=nsx_floating_ips, ips_to_remove=[])
except api_exc.NsxApiException:
LOG.exception(_("An error occurred while creating NAT "
"rules on the NSX platform for floating "
"ip:%(floating_ip)s mapped to "
"internal ip:%(internal_ip)s"),
{'floating_ip': floating_ip,
'internal_ip': internal_ip})
msg = _("Failed to update NAT rules for floatingip update")
raise nsx_exc.NsxPluginException(err_msg=msg)
floatingip_db.update({'fixed_ip_address': internal_ip,
'fixed_port_id': port_id,
'router_id': router_id})
**** CubicPower OpenStack Study ****
def delete_floatingip(self, context, id):
fip_db = self._get_floatingip(context, id)
# Check whether the floating ip is associated or not
if fip_db.fixed_port_id:
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, fip_db.router_id)
self._retrieve_and_delete_nat_rules(context,
fip_db.floating_ip_address,
fip_db.fixed_ip_address,
nsx_router_id,
min_num_rules_expected=1)
# Remove floating IP address from logical router port
self._remove_floatingip_address(context, fip_db)
return super(NsxPluginV2, self).delete_floatingip(context, id)
**** CubicPower OpenStack Study ****
def disassociate_floatingips(self, context, port_id):
try:
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(fixed_port_id=port_id).one()
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, fip_db.router_id)
self._retrieve_and_delete_nat_rules(context,
fip_db.floating_ip_address,
fip_db.fixed_ip_address,
nsx_router_id,
min_num_rules_expected=1)
self._remove_floatingip_address(context, fip_db)
except sa_exc.NoResultFound:
LOG.debug(_("The port '%s' is not associated with floating IPs"),
port_id)
except n_exc.NotFound:
LOG.warning(_("Nat rules not found in nsx for port: %s"), id)
super(NsxPluginV2, self).disassociate_floatingips(context, port_id)
**** CubicPower OpenStack Study ****
def create_network_gateway(self, context, network_gateway):
"""Create a layer-2 network gateway.
Create the gateway service on NSX platform and corresponding data
structures in Neutron datase.
"""
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Need to re-do authZ checks here in order to avoid creation on NSX
gw_data = network_gateway[networkgw.GATEWAY_RESOURCE_NAME]
tenant_id = self._get_tenant_id_for_create(context, gw_data)
devices = gw_data['devices']
# Populate default physical network where not specified
for device in devices:
if not device.get('interface_name'):
device['interface_name'] = self.cluster.default_interface_name
try:
# Replace Neutron device identifiers with NSX identifiers
# TODO(salv-orlando): Make this operation more efficient doing a
# single DB query for all devices
nsx_devices = [{'id': self._get_nsx_device_id(context,
device['id']),
'interface_name': device['interface_name']} for
device in devices]
nsx_res = l2gwlib.create_l2_gw_service(
self.cluster, tenant_id, gw_data['name'], nsx_devices)
nsx_uuid = nsx_res.get('uuid')
except api_exc.Conflict:
raise nsx_exc.L2GatewayAlreadyInUse(gateway=gw_data['name'])
except api_exc.NsxApiException:
err_msg = _("Unable to create l2_gw_service for: %s") % gw_data
LOG.exception(err_msg)
raise nsx_exc.NsxPluginException(err_msg=err_msg)
gw_data['id'] = nsx_uuid
return super(NsxPluginV2, self).create_network_gateway(
context, network_gateway)
**** CubicPower OpenStack Study ****
def delete_network_gateway(self, context, gateway_id):
"""Remove a layer-2 network gateway.
Remove the gateway service from NSX platform and corresponding data
structures in Neutron datase.
"""
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
with context.session.begin(subtransactions=True):
try:
super(NsxPluginV2, self).delete_network_gateway(
context, gateway_id)
l2gwlib.delete_l2_gw_service(self.cluster, gateway_id)
except api_exc.ResourceNotFound:
# Do not cause a 500 to be returned to the user if
# the corresponding NSX resource does not exist
LOG.exception(_("Unable to remove gateway service from "
"NSX plaform - the resource was not found"))
**** CubicPower OpenStack Study ****
def get_network_gateway(self, context, id, fields=None):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
return super(NsxPluginV2, self).get_network_gateway(context,
id, fields)
**** CubicPower OpenStack Study ****
def get_network_gateways(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Ensure the tenant_id attribute is populated on returned gateways
return super(NsxPluginV2, self).get_network_gateways(
context, filters, fields, sorts, limit, marker, page_reverse)
**** CubicPower OpenStack Study ****
def update_network_gateway(self, context, id, network_gateway):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Update gateway on backend when there's a name change
name = network_gateway[networkgw.GATEWAY_RESOURCE_NAME].get('name')
if name:
try:
l2gwlib.update_l2_gw_service(self.cluster, id, name)
except api_exc.NsxApiException:
# Consider backend failures as non-fatal, but still warn
# because this might indicate something dodgy is going on
LOG.warn(_("Unable to update name on NSX backend "
"for network gateway: %s"), id)
return super(NsxPluginV2, self).update_network_gateway(
context, id, network_gateway)
**** CubicPower OpenStack Study ****
def connect_network(self, context, network_gateway_id,
network_mapping_info):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
try:
return super(NsxPluginV2, self).connect_network(
context, network_gateway_id, network_mapping_info)
except api_exc.Conflict:
raise nsx_exc.L2GatewayAlreadyInUse(gateway=network_gateway_id)
**** CubicPower OpenStack Study ****
def disconnect_network(self, context, network_gateway_id,
network_mapping_info):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
return super(NsxPluginV2, self).disconnect_network(
context, network_gateway_id, network_mapping_info)
**** CubicPower OpenStack Study ****
def _get_nsx_device_id(self, context, device_id):
return self._get_gateway_device(context, device_id)['nsx_id']
**** CubicPower OpenStack Study ****
def _rollback_gw_device(self, context, device_id,
gw_data=None, new_status=None,
is_create=False, log_level=logging.ERROR):
LOG.log(log_level,
_("Rolling back database changes for gateway device %s "
"because of an error in the NSX backend"), device_id)
with context.session.begin(subtransactions=True):
query = self._model_query(
context, networkgw_db.NetworkGatewayDevice).filter(
networkgw_db.NetworkGatewayDevice.id == device_id)
if is_create:
query.delete(synchronize_session=False)
else:
super(NsxPluginV2, self).update_gateway_device(
context, device_id,
{networkgw.DEVICE_RESOURCE_NAME: gw_data})
if new_status:
query.update({'status': new_status},
synchronize_session=False)
# TODO(salv-orlando): Handlers for Gateway device operations should be
# moved into the appropriate nsx_handlers package once the code for the
# blueprint nsx-async-backend-communication merges
**** CubicPower OpenStack Study ****
def create_gateway_device_handler(self, context, gateway_device,
client_certificate):
neutron_id = gateway_device['id']
try:
nsx_res = l2gwlib.create_gateway_device(
self.cluster,
gateway_device['tenant_id'],
gateway_device['name'],
neutron_id,
self.cluster.default_tz_uuid,
gateway_device['connector_type'],
gateway_device['connector_ip'],
client_certificate)
# Fetch status (it needs another NSX API call)
device_status = nsx_utils.get_nsx_device_status(self.cluster,
nsx_res['uuid'])
# set NSX GW device in neutron database and update status
with context.session.begin(subtransactions=True):
query = self._model_query(
context, networkgw_db.NetworkGatewayDevice).filter(
networkgw_db.NetworkGatewayDevice.id == neutron_id)
query.update({'status': device_status,
'nsx_id': nsx_res['uuid']},
synchronize_session=False)
LOG.debug(_("Neutron gateway device: %(neutron_id)s; "
"NSX transport node identifier: %(nsx_id)s; "
"Operational status: %(status)s."),
{'neutron_id': neutron_id,
'nsx_id': nsx_res['uuid'],
'status': device_status})
return device_status
except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException):
with excutils.save_and_reraise_exception():
self._rollback_gw_device(context, neutron_id, is_create=True)
**** CubicPower OpenStack Study ****
def update_gateway_device_handler(self, context, gateway_device,
old_gateway_device_data,
client_certificate):
nsx_id = gateway_device['nsx_id']
neutron_id = gateway_device['id']
try:
l2gwlib.update_gateway_device(
self.cluster,
nsx_id,
gateway_device['tenant_id'],
gateway_device['name'],
neutron_id,
self.cluster.default_tz_uuid,
gateway_device['connector_type'],
gateway_device['connector_ip'],
client_certificate)
# Fetch status (it needs another NSX API call)
device_status = nsx_utils.get_nsx_device_status(self.cluster,
nsx_id)
# update status
with context.session.begin(subtransactions=True):
query = self._model_query(
context, networkgw_db.NetworkGatewayDevice).filter(
networkgw_db.NetworkGatewayDevice.id == neutron_id)
query.update({'status': device_status},
synchronize_session=False)
LOG.debug(_("Neutron gateway device: %(neutron_id)s; "
"NSX transport node identifier: %(nsx_id)s; "
"Operational status: %(status)s."),
{'neutron_id': neutron_id,
'nsx_id': nsx_id,
'status': device_status})
return device_status
except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException):
with excutils.save_and_reraise_exception():
self._rollback_gw_device(context, neutron_id,
gw_data=old_gateway_device_data)
except n_exc.NotFound:
# The gateway device was probably deleted in the backend.
# The DB change should be rolled back and the status must
# be put in error
with excutils.save_and_reraise_exception():
self._rollback_gw_device(context, neutron_id,
gw_data=old_gateway_device_data,
new_status=networkgw_db.ERROR)
**** CubicPower OpenStack Study ****
def get_gateway_device(self, context, device_id, fields=None):
# Get device from database
gw_device = super(NsxPluginV2, self).get_gateway_device(
context, device_id, fields, include_nsx_id=True)
# Fetch status from NSX
nsx_id = gw_device['nsx_id']
device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_id)
# TODO(salv-orlando): Asynchronous sync for gateway device status
# Update status in database
with context.session.begin(subtransactions=True):
query = self._model_query(
context, networkgw_db.NetworkGatewayDevice).filter(
networkgw_db.NetworkGatewayDevice.id == device_id)
query.update({'status': device_status},
synchronize_session=False)
gw_device['status'] = device_status
return gw_device
**** CubicPower OpenStack Study ****
def get_gateway_devices(self, context, filters=None, fields=None):
# Get devices from database
devices = super(NsxPluginV2, self).get_gateway_devices(
context, filters, fields, include_nsx_id=True)
# Fetch operational status from NVP, filter by tenant tag
# TODO(salv-orlando): Asynchronous sync for gateway device status
tenant_id = context.tenant_id if not context.is_admin else None
nsx_statuses = nsx_utils.get_nsx_device_statuses(self.cluster,
tenant_id)
# Update statuses in database
with context.session.begin(subtransactions=True):
for device in devices:
new_status = nsx_statuses.get(device['nsx_id'])
if new_status:
device['status'] = new_status
return devices
**** CubicPower OpenStack Study ****
def create_gateway_device(self, context, gateway_device):
# NOTE(salv-orlando): client-certificate will not be stored
# in the database
device_data = gateway_device[networkgw.DEVICE_RESOURCE_NAME]
client_certificate = device_data.pop('client_certificate')
gw_device = super(NsxPluginV2, self).create_gateway_device(
context, gateway_device)
# DB operation was successful, perform NSX operation
gw_device['status'] = self.create_gateway_device_handler(
context, gw_device, client_certificate)
return gw_device
**** CubicPower OpenStack Study ****
def update_gateway_device(self, context, device_id,
gateway_device):
# NOTE(salv-orlando): client-certificate will not be stored
# in the database
client_certificate = (
gateway_device[networkgw.DEVICE_RESOURCE_NAME].pop(
'client_certificate', None))
# Retrive current state from DB in case a rollback should be needed
old_gw_device_data = super(NsxPluginV2, self).get_gateway_device(
context, device_id, include_nsx_id=True)
gw_device = super(NsxPluginV2, self).update_gateway_device(
context, device_id, gateway_device, include_nsx_id=True)
# DB operation was successful, perform NSX operation
gw_device['status'] = self.update_gateway_device_handler(
context, gw_device, old_gw_device_data, client_certificate)
gw_device.pop('nsx_id')
return gw_device
**** CubicPower OpenStack Study ****
def delete_gateway_device(self, context, device_id):
nsx_device_id = self._get_nsx_device_id(context, device_id)
super(NsxPluginV2, self).delete_gateway_device(
context, device_id)
# DB operation was successful, peform NSX operation
# TODO(salv-orlando): State consistency with neutron DB
# should be ensured even in case of backend failures
try:
l2gwlib.delete_gateway_device(self.cluster, nsx_device_id)
except n_exc.NotFound:
LOG.warn(_("Removal of gateway device: %(neutron_id)s failed on "
"NSX backend (NSX id:%(nsx_id)s) because the NSX "
"resource was not found"),
{'neutron_id': device_id, 'nsx_id': nsx_device_id})
except api_exc.NsxApiException:
with excutils.save_and_reraise_exception():
# In this case a 500 should be returned
LOG.exception(_("Removal of gateway device: %(neutron_id)s "
"failed on NSX backend (NSX id:%(nsx_id)s). "
"Neutron and NSX states have diverged."),
{'neutron_id': device_id,
'nsx_id': nsx_device_id})
**** CubicPower OpenStack Study ****
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
If default_sg is true that means a we are creating a default security
group and we don't need to check if one exists.
"""
s = security_group.get('security_group')
tenant_id = self._get_tenant_id_for_create(context, s)
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
# NOTE(salv-orlando): Pre-generating Neutron ID for security group.
neutron_id = str(uuid.uuid4())
nsx_secgroup = secgrouplib.create_security_profile(
self.cluster, neutron_id, tenant_id, s)
with context.session.begin(subtransactions=True):
s['id'] = neutron_id
sec_group = super(NsxPluginV2, self).create_security_group(
context, security_group, default_sg)
context.session.flush()
# Add mapping between neutron and nsx identifiers
nsx_db.add_neutron_nsx_security_group_mapping(
context.session, neutron_id, nsx_secgroup['uuid'])
return sec_group
**** CubicPower OpenStack Study ****
def update_security_group(self, context, secgroup_id, security_group):
secgroup = (super(NsxPluginV2, self).
update_security_group(context,
secgroup_id,
security_group))
if ('name' in security_group['security_group'] and
secgroup['name'] != 'default'):
nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
context.session, self.cluster, secgroup_id)
try:
name = security_group['security_group']['name']
secgrouplib.update_security_profile(
self.cluster, nsx_sec_profile_id, name)
except (n_exc.NotFound, api_exc.NsxApiException) as e:
# Reverting the DB change is not really worthwhile
# for a mismatch between names. It's the rules that
# we care about.
LOG.error(_('Error while updating security profile '
'%(uuid)s with name %(name)s: %(error)s.')
% {'uuid': secgroup_id, 'name': name, 'error': e})
return secgroup
**** CubicPower OpenStack Study ****
def delete_security_group(self, context, security_group_id):
"""Delete a security group.
:param security_group_id: security group rule to remove.
"""
with context.session.begin(subtransactions=True):
security_group = super(NsxPluginV2, self).get_security_group(
context, security_group_id)
if not security_group:
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
if security_group['name'] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
filters = {'security_group_id': [security_group['id']]}
if super(NsxPluginV2, self)._get_port_security_group_bindings(
context, filters):
raise ext_sg.SecurityGroupInUse(id=security_group['id'])
nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
context.session, self.cluster, security_group_id)
try:
secgrouplib.delete_security_profile(
self.cluster, nsx_sec_profile_id)
except n_exc.NotFound:
# The security profile was not found on the backend
# do not fail in this case.
LOG.warning(_("The NSX security profile %(sec_profile_id)s, "
"associated with the Neutron security group "
"%(sec_group_id)s was not found on the backend"),
{'sec_profile_id': nsx_sec_profile_id,
'sec_group_id': security_group_id})
except api_exc.NsxApiException:
# Raise and fail the operation, as there is a problem which
# prevented the sec group from being removed from the backend
LOG.exception(_("An exception occurred while removing the "
"NSX security profile %(sec_profile_id)s, "
"associated with Netron security group "
"%(sec_group_id)s"),
{'sec_profile_id': nsx_sec_profile_id,
'sec_group_id': security_group_id})
raise nsx_exc.NsxPluginException(
_("Unable to remove security group %s from backend"),
security_group['id'])
return super(NsxPluginV2, self).delete_security_group(
context, security_group_id)
**** CubicPower OpenStack Study ****
def _validate_security_group_rules(self, context, rules):
for rule in rules['security_group_rules']:
r = rule.get('security_group_rule')
port_based_proto = (self._get_ip_proto_number(r['protocol'])
in securitygroups_db.IP_PROTOCOL_MAP.values())
if (not port_based_proto and
(r['port_range_min'] is not None or
r['port_range_max'] is not None)):
msg = (_("Port values not valid for "
"protocol: %s") % r['protocol'])
raise n_exc.BadRequest(resource='security_group_rule',
msg=msg)
return super(NsxPluginV2, self)._validate_security_group_rules(context,
rules)
**** CubicPower OpenStack Study ****
def create_security_group_rule(self, context, security_group_rule):
"""Create a single security group rule."""
bulk_rule = {'security_group_rules': [security_group_rule]}
return self.create_security_group_rule_bulk(context, bulk_rule)[0]
**** CubicPower OpenStack Study ****
def create_security_group_rule_bulk(self, context, security_group_rule):
"""Create security group rules.
:param security_group_rule: list of rules to create
"""
s = security_group_rule.get('security_group_rules')
tenant_id = self._get_tenant_id_for_create(context, s)
# TODO(arosen) is there anyway we could avoid having the update of
# the security group rules in nsx outside of this transaction?
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(context, tenant_id)
security_group_id = self._validate_security_group_rules(
context, security_group_rule)
# Check to make sure security group exists
security_group = super(NsxPluginV2, self).get_security_group(
context, security_group_id)
if not security_group:
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
# Check for duplicate rules
self._check_for_duplicate_rules(context, s)
# gather all the existing security group rules since we need all
# of them to PUT to NSX.
existing_rules = self.get_security_group_rules(
context, {'security_group_id': [security_group['id']]})
combined_rules = sg_utils.merge_security_group_rules_with_current(
context.session, self.cluster, s, existing_rules)
nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
context.session, self.cluster, security_group_id)
secgrouplib.update_security_group_rules(self.cluster,
nsx_sec_profile_id,
combined_rules)
return super(
NsxPluginV2, self).create_security_group_rule_bulk_native(
context, security_group_rule)
**** CubicPower OpenStack Study ****
def delete_security_group_rule(self, context, sgrid):
"""Delete a security group rule
:param sgrid: security group id to remove.
"""
with context.session.begin(subtransactions=True):
# determine security profile id
security_group_rule = (
super(NsxPluginV2, self).get_security_group_rule(
context, sgrid))
if not security_group_rule:
raise ext_sg.SecurityGroupRuleNotFound(id=sgrid)
sgid = security_group_rule['security_group_id']
current_rules = self.get_security_group_rules(
context, {'security_group_id': [sgid]})
current_rules_nsx = sg_utils.get_security_group_rules_nsx_format(
context.session, self.cluster, current_rules, True)
sg_utils.remove_security_group_with_id_and_id_field(
current_rules_nsx, sgrid)
nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
context.session, self.cluster, sgid)
secgrouplib.update_security_group_rules(
self.cluster, nsx_sec_profile_id, current_rules_nsx)
return super(NsxPluginV2, self).delete_security_group_rule(context,
sgrid)
**** CubicPower OpenStack Study ****
def create_qos_queue(self, context, qos_queue, check_policy=True):
q = qos_queue.get('qos_queue')
self._validate_qos_queue(context, q)
q['id'] = queuelib.create_lqueue(self.cluster, q)
return super(NsxPluginV2, self).create_qos_queue(context, qos_queue)
**** CubicPower OpenStack Study ****
def delete_qos_queue(self, context, queue_id, raise_in_use=True):
filters = {'queue_id': [queue_id]}
queues = self._get_port_queue_bindings(context, filters)
if queues:
if raise_in_use:
raise qos.QueueInUseByPort()
else:
return
queuelib.delete_lqueue(self.cluster, queue_id)
return super(NsxPluginV2, self).delete_qos_queue(context, queue_id)
# for backward compatibility
NvpPluginV2 = NsxPluginV2