¡@

Home 

OpenStack Study: create_volume.py

OpenStack Index

**** CubicPower OpenStack Study ****

# Licensed under the Apache License, Version 2.0 (the "License"); you may

# not use this file except in compliance with the License. You may obtain

# a copy of the License at

#

# http://www.apache.org/licenses/LICENSE-2.0

#

# Unless required by applicable law or agreed to in writing, software

# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT

# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the

# License for the specific language governing permissions and limitations

# under the License.

from oslo.config import cfg

import taskflow.engines

from taskflow.patterns import linear_flow

from taskflow.utils import misc

from cinder import exception

from cinder import flow_utils

from cinder.openstack.common import log as logging

from cinder.openstack.common import timeutils

from cinder import policy

from cinder import quota

from cinder import units

from cinder import utils

from cinder.volume.flows import common

from cinder.volume import volume_types

LOG = logging.getLogger(__name__)

ACTION = 'volume:create'

CONF = cfg.CONF

GB = units.GiB

QUOTAS = quota.QUOTAS

# Only in these 'sources' status can we attempt to create a volume from a

# source volume or a source snapshot, other status states we can not create

# from, 'error' being the common example.

SNAPSHOT_PROCEED_STATUS = ('available',)

SRC_VOL_PROCEED_STATUS = ('available', 'in-use',)

**** CubicPower OpenStack Study ****

class ExtractVolumeRequestTask(flow_utils.CinderTask):

"""Processes an api request values into a validated set of values.

This tasks responsibility is to take in a set of inputs that will form

a potential volume request and validates those values against a set of

conditions and/or translates those values into a valid set and then returns

the validated/translated values for use by other tasks.

Reversion strategy: N/A

"""

# This task will produce the following outputs (said outputs can be

# saved to durable storage in the future so that the flow can be

# reconstructed elsewhere and continued).

**** CubicPower OpenStack Study ****

    def __init__(self, image_service, az_check_functor=None, **kwargs):

        super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION],

                                                       **kwargs)

        self.image_service = image_service

        self.az_check_functor = az_check_functor

        if not self.az_check_functor:

            self.az_check_functor = lambda az: True

    @staticmethod

**** CubicPower OpenStack Study ****

    def _extract_snapshot(snapshot):

        """Extracts the snapshot id from the provided snapshot (if provided).

        This function validates the input snapshot dict and checks that the

        status of that snapshot is valid for creating a volume from.

        """

        snapshot_id = None

        if snapshot is not None:

            if snapshot['status'] not in SNAPSHOT_PROCEED_STATUS:

                msg = _("Originating snapshot status must be one"

                        " of %s values")

                msg = msg % (", ".join(SNAPSHOT_PROCEED_STATUS))

                # TODO(harlowja): what happens if the status changes after this

                # initial snapshot status check occurs??? Seems like someone

                # could delete the snapshot after this check passes but before

                # the volume is officially created?

                raise exception.InvalidSnapshot(reason=msg)

            snapshot_id = snapshot['id']

        return snapshot_id

    @staticmethod

**** CubicPower OpenStack Study ****

    def _extract_source_volume(source_volume):

        """Extracts the volume id from the provided volume (if provided).

        This function validates the input source_volume dict and checks that

        the status of that source_volume is valid for creating a volume from.

        """

        source_volid = None

        if source_volume is not None:

            if source_volume['status'] not in SRC_VOL_PROCEED_STATUS:

                msg = _("Unable to create a volume from an originating source"

                        " volume when its status is not one of %s"

                        " values")

                msg = msg % (", ".join(SRC_VOL_PROCEED_STATUS))

                # TODO(harlowja): what happens if the status changes after this

                # initial volume status check occurs??? Seems like someone

                # could delete the volume after this check passes but before

                # the volume is officially created?

                raise exception.InvalidVolume(reason=msg)

            source_volid = source_volume['id']

        return source_volid

    @staticmethod

**** CubicPower OpenStack Study ****

    def _extract_size(size, source_volume, snapshot):

        """Extracts and validates the volume size.

        This function will validate or when not provided fill in the provided

        size variable from the source_volume or snapshot and then does

        validation on the size that is found and returns said validated size.

        """

        def validate_snap_size(size):

            if snapshot and size < snapshot['volume_size']:

                msg = _("Volume size %(size)sGB cannot be smaller than"

                        " the snapshot size %(snap_size)sGB. "

                        "They must be >= original snapshot size.")

                msg = msg % {'size': size,

                             'snap_size': snapshot['volume_size']}

                raise exception.InvalidInput(reason=msg)

        def validate_source_size(size):

            if source_volume and size < source_volume['size']:

                msg = _("Volume size %(size)sGB cannot be smaller than "

                        "original volume size  %(source_size)sGB. "

                        "They must be >= original volume size.")

                msg = msg % {'size': size,

                             'source_size': source_volume['size']}

                raise exception.InvalidInput(reason=msg)

        def validate_int(size):

            if not isinstance(size, int) or size <= 0:

                msg = _("Volume size %(size)s must be an integer and"

                        " greater than 0") % {'size': size}

                raise exception.InvalidInput(reason=msg)

        # Figure out which validation functions we should be applying

        # on the size value that we extract.

        validator_functors = [validate_int]

        if source_volume:

            validator_functors.append(validate_source_size)

        elif snapshot:

            validator_functors.append(validate_snap_size)

        # If the size is not provided then try to provide it.

        if not size and source_volume:

            size = source_volume['size']

        elif not size and snapshot:

            size = snapshot['volume_size']

        size = utils.as_int(size)

        LOG.debug("Validating volume %(size)s using %(functors)s" %

                  {'size': size,

                   'functors': ", ".join([common.make_pretty_name(func)

                                          for func in validator_functors])})

        for func in validator_functors:

            func(size)

        return size

**** CubicPower OpenStack Study ****

        def validate_snap_size(size):

            if snapshot and size < snapshot['volume_size']:

                msg = _("Volume size %(size)sGB cannot be smaller than"

                        " the snapshot size %(snap_size)sGB. "

                        "They must be >= original snapshot size.")

                msg = msg % {'size': size,

                             'snap_size': snapshot['volume_size']}

                raise exception.InvalidInput(reason=msg)

**** CubicPower OpenStack Study ****

        def validate_source_size(size):

            if source_volume and size < source_volume['size']:

                msg = _("Volume size %(size)sGB cannot be smaller than "

                        "original volume size  %(source_size)sGB. "

                        "They must be >= original volume size.")

                msg = msg % {'size': size,

                             'source_size': source_volume['size']}

                raise exception.InvalidInput(reason=msg)

**** CubicPower OpenStack Study ****

        def validate_int(size):

            if not isinstance(size, int) or size <= 0:

                msg = _("Volume size %(size)s must be an integer and"

                        " greater than 0") % {'size': size}

                raise exception.InvalidInput(reason=msg)

        # Figure out which validation functions we should be applying

        # on the size value that we extract.

        validator_functors = [validate_int]

        if source_volume:

            validator_functors.append(validate_source_size)

        elif snapshot:

            validator_functors.append(validate_snap_size)

        # If the size is not provided then try to provide it.

        if not size and source_volume:

            size = source_volume['size']

        elif not size and snapshot:

            size = snapshot['volume_size']

        size = utils.as_int(size)

        LOG.debug("Validating volume %(size)s using %(functors)s" %

                  {'size': size,

                   'functors': ", ".join([common.make_pretty_name(func)

                                          for func in validator_functors])})

        for func in validator_functors:

            func(size)

        return size

**** CubicPower OpenStack Study ****

    def _check_image_metadata(self, context, image_id, size):

        """Checks image existence and validates that the image metadata."""

        # Check image existence

        if not image_id:

            return

        # NOTE(harlowja): this should raise an error if the image does not

        # exist, this is expected as it signals that the image_id is missing.

        image_meta = self.image_service.show(context, image_id)

        # Check image size is not larger than volume size.

        image_size = utils.as_int(image_meta['size'], quiet=False)

        image_size_in_gb = (image_size + GB - 1) / GB

        if image_size_in_gb > size:

            msg = _('Size of specified image %(image_size)sGB'

                    ' is larger than volume size %(volume_size)sGB.')

            msg = msg % {'image_size': image_size_in_gb, 'volume_size': size}

            raise exception.InvalidInput(reason=msg)

        # Check image min_disk requirement is met for the particular volume

        min_disk = image_meta.get('min_disk', 0)

        if size < min_disk:

            msg = _('Volume size %(volume_size)sGB cannot be smaller'

                    ' than the image minDisk size %(min_disk)sGB.')

            msg = msg % {'volume_size': size, 'min_disk': min_disk}

            raise exception.InvalidInput(reason=msg)

    @staticmethod

**** CubicPower OpenStack Study ****

    def _check_metadata_properties(metadata=None):

        """Checks that the volume metadata properties are valid."""

        if not metadata:

            metadata = {}

        for (k, v) in metadata.iteritems():

            if len(k) == 0:

                msg = _("Metadata property key blank")

                LOG.warn(msg)

                raise exception.InvalidVolumeMetadata(reason=msg)

            if len(k) > 255:

                msg = _("Metadata property key %s greater than 255 "

                        "characters") % k

                LOG.warn(msg)

                raise exception.InvalidVolumeMetadataSize(reason=msg)

            if len(v) > 255:

                msg = _("Metadata property key %s value greater than"

                        " 255 characters") % k

                LOG.warn(msg)

                raise exception.InvalidVolumeMetadataSize(reason=msg)

**** CubicPower OpenStack Study ****

    def _extract_availability_zone(self, availability_zone, snapshot,

                                   source_volume):

        """Extracts and returns a validated availability zone.

        This function will extract the availability zone (if not provided) from

        the snapshot or source_volume and then performs a set of validation

        checks on the provided or extracted availability zone and then returns

        the validated availability zone.

        """

        # Try to extract the availability zone from the corresponding snapshot

        # or source volume if either is valid so that we can be in the same

        # availability zone as the source.

        if availability_zone is None:

            if snapshot:

                try:

                    availability_zone = snapshot['volume']['availability_zone']

                except (TypeError, KeyError):

                    pass

            if source_volume and availability_zone is None:

                try:

                    availability_zone = source_volume['availability_zone']

                except (TypeError, KeyError):

                    pass

        if availability_zone is None:

            if CONF.default_availability_zone:

                availability_zone = CONF.default_availability_zone

            else:

                # For backwards compatibility use the storage_availability_zone

                availability_zone = CONF.storage_availability_zone

        if not self.az_check_functor(availability_zone):

            msg = _("Availability zone '%s' is invalid") % (availability_zone)

            LOG.warn(msg)

            raise exception.InvalidInput(reason=msg)

        # If the configuration only allows cloning to the same availability

        # zone then we need to enforce that.

        if CONF.cloned_volume_same_az:

            snap_az = None

            try:

                snap_az = snapshot['volume']['availability_zone']

            except (TypeError, KeyError):

                pass

            if snap_az and snap_az != availability_zone:

                msg = _("Volume must be in the same "

                        "availability zone as the snapshot")

                raise exception.InvalidInput(reason=msg)

            source_vol_az = None

            try:

                source_vol_az = source_volume['availability_zone']

            except (TypeError, KeyError):

                pass

            if source_vol_az and source_vol_az != availability_zone:

                msg = _("Volume must be in the same "

                        "availability zone as the source volume")

                raise exception.InvalidInput(reason=msg)

        return availability_zone

**** CubicPower OpenStack Study ****

    def _get_encryption_key_id(self, key_manager, context, volume_type_id,

                               snapshot, source_volume, backup_source_volume):

        encryption_key_id = None

        if volume_types.is_encrypted(context, volume_type_id):

            if snapshot is not None:  # creating from snapshot

                encryption_key_id = snapshot['encryption_key_id']

            elif source_volume is not None:  # cloning volume

                encryption_key_id = source_volume['encryption_key_id']

            elif backup_source_volume is not None:  # creating from backup

                encryption_key_id = backup_source_volume['encryption_key_id']

            # NOTE(joel-coffman): References to the encryption key should *not*

            # be copied because the key is deleted when the volume is deleted.

            # Clone the existing key and associate a separate -- but

            # identical -- key with each volume.

            if encryption_key_id is not None:

                encryption_key_id = key_manager.copy_key(context,

                                                         encryption_key_id)

            else:

                encryption_key_id = key_manager.create_key(context)

        return encryption_key_id

**** CubicPower OpenStack Study ****

    def _get_volume_type_id(self, volume_type, source_volume, snapshot,

                            backup_source_volume):

        volume_type_id = None

        if not volume_type and source_volume:

            volume_type_id = source_volume['volume_type_id']

        elif snapshot is not None:

            if volume_type:

                current_volume_type_id = volume_type.get('id')

                if (current_volume_type_id !=

                        snapshot['volume_type_id']):

                    msg = _("Volume type will be changed to "

                            "be the same as the source volume.")

                    LOG.warn(msg)

            volume_type_id = snapshot['volume_type_id']

        elif backup_source_volume is not None:

            volume_type_id = backup_source_volume['volume_type_id']

        else:

            volume_type_id = volume_type.get('id')

        return volume_type_id

**** CubicPower OpenStack Study ****

    def execute(self, context, size, snapshot, image_id, source_volume,

                availability_zone, volume_type, metadata,

                key_manager, backup_source_volume):

        utils.check_exclusive_options(snapshot=snapshot,

                                      imageRef=image_id,

                                      source_volume=source_volume)

        policy.enforce_action(context, ACTION)

        # TODO(harlowja): what guarantee is there that the snapshot or source

        # volume will remain available after we do this initial verification??

        snapshot_id = self._extract_snapshot(snapshot)

        source_volid = self._extract_source_volume(source_volume)

        size = self._extract_size(size, source_volume, snapshot)

        self._check_image_metadata(context, image_id, size)

        availability_zone = self._extract_availability_zone(availability_zone,

                                                            snapshot,

                                                            source_volume)

        # TODO(joel-coffman): This special handling of snapshots to ensure that

        # their volume type matches the source volume is too convoluted. We

        # should copy encryption metadata from the encrypted volume type to the

        # volume upon creation and propagate that information to each snapshot.

        # This strategy avoid any dependency upon the encrypted volume type.

        if not volume_type and not source_volume and not snapshot:

            volume_type = volume_types.get_default_volume_type()

        volume_type_id = self._get_volume_type_id(volume_type,

                                                  source_volume, snapshot,

                                                  backup_source_volume)

        encryption_key_id = self._get_encryption_key_id(key_manager,

                                                        context,

                                                        volume_type_id,

                                                        snapshot,

                                                        source_volume,

                                                        backup_source_volume)

        specs = {}

        if volume_type_id:

            qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id)

            specs = qos_specs['qos_specs']

        if not specs:

            # to make sure we don't pass empty dict

            specs = None

        self._check_metadata_properties(metadata)

        return {

            'size': size,

            'snapshot_id': snapshot_id,

            'source_volid': source_volid,

            'availability_zone': availability_zone,

            'volume_type': volume_type,

            'volume_type_id': volume_type_id,

            'encryption_key_id': encryption_key_id,

            'qos_specs': specs,

        }

**** CubicPower OpenStack Study ****

class EntryCreateTask(flow_utils.CinderTask):

"""Creates an entry for the given volume creation in the database.

Reversion strategy: remove the volume_id created from the database.

"""

**** CubicPower OpenStack Study ****

    def __init__(self, db):

        requires = ['availability_zone', 'description', 'metadata',

                    'name', 'reservations', 'size', 'snapshot_id',

                    'source_volid', 'volume_type_id', 'encryption_key_id']

        super(EntryCreateTask, self).__init__(addons=[ACTION],

                                              requires=requires)

        self.db = db

        self.provides.update()

**** CubicPower OpenStack Study ****

    def execute(self, context, **kwargs):

        """Creates a database entry for the given inputs and returns details.

        Accesses the database and creates a new entry for the to be created

        volume using the given volume properties which are extracted from the

        input kwargs (and associated requirements this task needs). These

        requirements should be previously satisfied and validated by a

        pre-cursor task.

        """

        volume_properties = {

            'size': kwargs.pop('size'),

            'user_id': context.user_id,

            'project_id': context.project_id,

            'status': 'creating',

            'attach_status': 'detached',

            'encryption_key_id': kwargs.pop('encryption_key_id'),

            # Rename these to the internal name.

            'display_description': kwargs.pop('description'),

            'display_name': kwargs.pop('name'),

        }

        # Merge in the other required arguments which should provide the rest

        # of the volume property fields (if applicable).

        volume_properties.update(kwargs)

        volume = self.db.volume_create(context, volume_properties)

        return {

            'volume_id': volume['id'],

            'volume_properties': volume_properties,

            # NOTE(harlowja): it appears like further usage of this volume

            # result actually depend on it being a sqlalchemy object and not

            # just a plain dictionary so that's why we are storing this here.

            #

            # In the future where this task results can be serialized and

            # restored automatically for continued running we will need to

            # resolve the serialization & recreation of this object since raw

            # sqlalchemy objects can't be serialized.

            'volume': volume,

        }

**** CubicPower OpenStack Study ****

    def revert(self, context, result, **kwargs):

        # We never produced a result and therefore can't destroy anything.

        if isinstance(result, misc.Failure):

            return

        if context.quota_committed:

            # Committed quota doesn't rollback as the volume has already been

            # created at this point, and the quota has already been absorbed.

            return

        vol_id = result['volume_id']

        try:

            self.db.volume_destroy(context.elevated(), vol_id)

        except exception.CinderException:

            # We are already reverting, therefore we should silence this

            # exception since a second exception being active will be bad.

            #

            # NOTE(harlowja): Being unable to destroy a volume is pretty

            # bad though!!

            LOG.exception(_("Failed destroying volume entry %s"), vol_id)

**** CubicPower OpenStack Study ****

class QuotaReserveTask(flow_utils.CinderTask):

"""Reserves a single volume with the given size & the given volume type.

Reversion strategy: rollback the quota reservation.

Warning Warning: if the process that is running this reserve and commit

process fails (or is killed before the quota is rolled back or committed

it does appear like the quota will never be rolled back). This makes

software upgrades hard (inflight operations will need to be stopped or

allowed to complete before the upgrade can occur). *In the future* when

taskflow has persistence built-in this should be easier to correct via

an automated or manual process.

"""

**** CubicPower OpenStack Study ****

    def __init__(self):

        super(QuotaReserveTask, self).__init__(addons=[ACTION])

**** CubicPower OpenStack Study ****

    def execute(self, context, size, volume_type_id):

        try:

            reserve_opts = {'volumes': 1, 'gigabytes': size}

            QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)

            reservations = QUOTAS.reserve(context, **reserve_opts)

            return {

                'reservations': reservations,

            }

        except exception.OverQuota as e:

            overs = e.kwargs['overs']

            quotas = e.kwargs['quotas']

            usages = e.kwargs['usages']

            def _consumed(name):

                return (usages[name]['reserved'] + usages[name]['in_use'])

            def _is_over(name):

                for over in overs:

                    if name in over:

                        return True

                return False

            if _is_over('gigabytes'):

                msg = _("Quota exceeded for %(s_pid)s, tried to create "

                        "%(s_size)sG volume (%(d_consumed)dG "

                        "of %(d_quota)dG already consumed)")

                LOG.warn(msg % {'s_pid': context.project_id,

                                's_size': size,

                                'd_consumed': _consumed('gigabytes'),

                                'd_quota': quotas['gigabytes']})

                raise exception.VolumeSizeExceedsAvailableQuota(

                    requested=size,

                    consumed=_consumed('gigabytes'),

                    quota=quotas['gigabytes'])

            elif _is_over('volumes'):

                msg = _("Quota exceeded for %(s_pid)s, tried to create "

                        "volume (%(d_consumed)d volumes "

                        "already consumed)")

                LOG.warn(msg % {'s_pid': context.project_id,

                                'd_consumed': _consumed('volumes')})

                raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])

            else:

                # If nothing was reraised, ensure we reraise the initial error

                raise

**** CubicPower OpenStack Study ****

    def revert(self, context, result, **kwargs):

        # We never produced a result and therefore can't destroy anything.

        if isinstance(result, misc.Failure):

            return

        if context.quota_committed:

            # The reservations have already been committed and can not be

            # rolled back at this point.

            return

        # We actually produced an output that we can revert so lets attempt

        # to use said output to rollback the reservation.

        reservations = result['reservations']

        try:

            QUOTAS.rollback(context, reservations)

        except exception.CinderException:

            # We are already reverting, therefore we should silence this

            # exception since a second exception being active will be bad.

            LOG.exception(_("Failed rolling back quota for"

                            " %s reservations"), reservations)

**** CubicPower OpenStack Study ****

class QuotaCommitTask(flow_utils.CinderTask):

"""Commits the reservation.

Reversion strategy: N/A (the rollback will be handled by the task that did

the initial reservation (see: QuotaReserveTask).

Warning Warning: if the process that is running this reserve and commit

process fails (or is killed before the quota is rolled back or committed

it does appear like the quota will never be rolled back). This makes

software upgrades hard (inflight operations will need to be stopped or

allowed to complete before the upgrade can occur). *In the future* when

taskflow has persistence built-in this should be easier to correct via

an automated or manual process.

"""

**** CubicPower OpenStack Study ****

    def __init__(self):

        super(QuotaCommitTask, self).__init__(addons=[ACTION])

**** CubicPower OpenStack Study ****

    def execute(self, context, reservations, volume_properties):

        QUOTAS.commit(context, reservations)

        context.quota_committed = True

        return {'volume_properties': volume_properties}

**** CubicPower OpenStack Study ****

    def revert(self, context, result, **kwargs):

        # We never produced a result and therefore can't destroy anything.

        if isinstance(result, misc.Failure):

            return

        volume = result['volume_properties']

        try:

            reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}

            QUOTAS.add_volume_type_opts(context,

                                        reserve_opts,

                                        volume['volume_type_id'])

            reservations = QUOTAS.reserve(context,

                                          project_id=context.project_id,

                                          **reserve_opts)

            if reservations:

                QUOTAS.commit(context, reservations,

                              project_id=context.project_id)

        except Exception:

            LOG.exception(_("Failed to update quota for deleting volume: %s"),

                          volume['id'])

**** CubicPower OpenStack Study ****

class VolumeCastTask(flow_utils.CinderTask):

"""Performs a volume create cast to the scheduler or to the volume manager.

This which will signal a transition of the api workflow to another child

and/or related workflow on another component.

Reversion strategy: N/A

"""

**** CubicPower OpenStack Study ****

    def __init__(self, scheduler_rpcapi, volume_rpcapi, db):

        requires = ['image_id', 'scheduler_hints', 'snapshot_id',

                    'source_volid', 'volume_id', 'volume_type',

                    'volume_properties']

        super(VolumeCastTask, self).__init__(addons=[ACTION],

                                             requires=requires)

        self.volume_rpcapi = volume_rpcapi

        self.scheduler_rpcapi = scheduler_rpcapi

        self.db = db

**** CubicPower OpenStack Study ****

    def _cast_create_volume(self, context, request_spec, filter_properties):

        source_volid = request_spec['source_volid']

        volume_id = request_spec['volume_id']

        snapshot_id = request_spec['snapshot_id']

        image_id = request_spec['image_id']

        host = None

        if snapshot_id and CONF.snapshot_same_host:

            # NOTE(Rongze Zhu): A simple solution for bug 1008866.

            #

            # If snapshot_id is set, make the call create volume directly to

            # the volume host where the snapshot resides instead of passing it

            # through the scheduler. So snapshot can be copy to new volume.

            snapshot_ref = self.db.snapshot_get(context, snapshot_id)

            source_volume_ref = self.db.volume_get(context,

                                                   snapshot_ref['volume_id'])

            host = source_volume_ref['host']

        elif source_volid:

            source_volume_ref = self.db.volume_get(context, source_volid)

            host = source_volume_ref['host']

        if not host:

            # Cast to the scheduler and let it handle whatever is needed

            # to select the target host for this volume.

            self.scheduler_rpcapi.create_volume(

                context,

                CONF.volume_topic,

                volume_id,

                snapshot_id=snapshot_id,

                image_id=image_id,

                request_spec=request_spec,

                filter_properties=filter_properties)

        else:

            # Bypass the scheduler and send the request directly to the volume

            # manager.

            now = timeutils.utcnow()

            values = {'host': host, 'scheduled_at': now}

            volume_ref = self.db.volume_update(context, volume_id, values)

            self.volume_rpcapi.create_volume(

                context,

                volume_ref,

                volume_ref['host'],

                request_spec,

                filter_properties,

                allow_reschedule=False,

                snapshot_id=snapshot_id,

                image_id=image_id,

                source_volid=source_volid)

**** CubicPower OpenStack Study ****

    def execute(self, context, **kwargs):

        scheduler_hints = kwargs.pop('scheduler_hints', None)

        request_spec = kwargs.copy()

        filter_properties = {}

        if scheduler_hints:

            filter_properties['scheduler_hints'] = scheduler_hints

        self._cast_create_volume(context, request_spec, filter_properties)

**** CubicPower OpenStack Study ****

    def revert(self, context, result, flow_failures, **kwargs):

        if isinstance(result, misc.Failure):

            return

        # Restore the source volume status and set the volume to error status.

        volume_id = kwargs['volume_id']

        common.restore_source_status(context, self.db, kwargs)

        common.error_out_volume(context, self.db, volume_id)

        LOG.error(_("Volume %s: create failed"), volume_id)

        exc_info = False

        if all(flow_failures[-1].exc_info):

            exc_info = flow_failures[-1].exc_info

        LOG.error(_('Unexpected build error:'), exc_info=exc_info)

def get_flow(scheduler_rpcapi, volume_rpcapi, db,

             image_service,

             az_check_functor,

             create_what):

    """Constructs and returns the api entrypoint flow.

    This flow will do the following:

    1. Inject keys & values for dependent tasks.

    2. Extracts and validates the input keys & values.

    3. Reserves the quota (reverts quota on any failures).

    4. Creates the database entry.

    5. Commits the quota.

    6. Casts to volume manager or scheduler for further processing.

    """

    flow_name = ACTION.replace(":", "_") + "_api"

    api_flow = linear_flow.Flow(flow_name)

    api_flow.add(ExtractVolumeRequestTask(

        image_service,

        az_check_functor,

        rebind={'size': 'raw_size',

                'availability_zone': 'raw_availability_zone',

                'volume_type': 'raw_volume_type'}))

    api_flow.add(QuotaReserveTask(),

                 EntryCreateTask(db),

                 QuotaCommitTask())

    # This will cast it out to either the scheduler or volume manager via

    # the rpc apis provided.

    api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db))

    # Now load (but do not run) the flow using the provided initial data.

    return taskflow.engines.load(api_flow, store=create_what)

**** CubicPower OpenStack Study ****

def get_flow(scheduler_rpcapi, volume_rpcapi, db,

             image_service,

             az_check_functor,

             create_what):

    """Constructs and returns the api entrypoint flow.

    This flow will do the following:

    1. Inject keys & values for dependent tasks.

    2. Extracts and validates the input keys & values.

    3. Reserves the quota (reverts quota on any failures).

    4. Creates the database entry.

    5. Commits the quota.

    6. Casts to volume manager or scheduler for further processing.

    """

    flow_name = ACTION.replace(":", "_") + "_api"

    api_flow = linear_flow.Flow(flow_name)

    api_flow.add(ExtractVolumeRequestTask(

        image_service,

        az_check_functor,

        rebind={'size': 'raw_size',

                'availability_zone': 'raw_availability_zone',

                'volume_type': 'raw_volume_type'}))

    api_flow.add(QuotaReserveTask(),

                 EntryCreateTask(db),

                 QuotaCommitTask())

    # This will cast it out to either the scheduler or volume manager via

    # the rpc apis provided.

    api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db))

    # Now load (but do not run) the flow using the provided initial data.

    return taskflow.engines.load(api_flow, store=create_what)