diff options
| author | Jenkins <jenkins@review.openstack.org> | 2012-04-30 22:23:11 +0000 |
|---|---|---|
| committer | Gerrit Code Review <review@openstack.org> | 2012-04-30 22:23:11 +0000 |
| commit | dda036044e2ddb249aef8d483269bcbf97141abd (patch) | |
| tree | 86c4f07c5198265e06752b26a7120371307f9db0 | |
| parent | 2c72cb3bcee6b64fff6e3eaa738e4bc80828a830 (diff) | |
| parent | 58af96d3e01c7fbe993344374190d8afe1a1d0ff (diff) | |
| download | nova-dda036044e2ddb249aef8d483269bcbf97141abd.tar.gz nova-dda036044e2ddb249aef8d483269bcbf97141abd.tar.xz nova-dda036044e2ddb249aef8d483269bcbf97141abd.zip | |
Merge "Migrate block_device_mapping to use instance uuids."
| -rw-r--r-- | nova/api/ec2/cloud.py | 8 | ||||
| -rw-r--r-- | nova/api/metadata/handler.py | 2 | ||||
| -rw-r--r-- | nova/compute/api.py | 17 | ||||
| -rw-r--r-- | nova/compute/manager.py | 50 | ||||
| -rw-r--r-- | nova/db/api.py | 9 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/api.py | 14 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py | 81 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql | 97 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql | 97 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/migration.py | 5 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/models.py | 10 | ||||
| -rw-r--r-- | nova/tests/api/ec2/test_cloud.py | 32 | ||||
| -rw-r--r-- | nova/tests/api/ec2/test_ec2_validate.py | 2 | ||||
| -rw-r--r-- | nova/tests/test_compute.py | 35 | ||||
| -rw-r--r-- | nova/tests/test_metadata.py | 33 |
15 files changed, 390 insertions, 102 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 4b2d1d686..8c6a1fdc3 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -999,7 +999,7 @@ class CloudController(object): def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) - self._format_instance_bdm(context, instance_id, + self._format_instance_bdm(context, instance['uuid'], tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): @@ -1099,13 +1099,13 @@ class CloudController(object): instances_set.append(i) return {'instancesSet': instances_set} - def _format_instance_bdm(self, context, instance_id, root_device_name, + def _format_instance_bdm(self, context, instance_uuid, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, - instance_id): + instance_uuid): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue @@ -1221,7 +1221,7 @@ class CloudController(object): i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] self._format_instance_root_device_name(instance, i) - self._format_instance_bdm(context, instance_id, + self._format_instance_bdm(context, instance['uuid'], i['rootDeviceName'], i) host = instance['host'] services = db.service_get_all_by_host(context.elevated(), host) diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py index 1ad8a5413..9929e6e66 100644 --- a/nova/api/metadata/handler.py +++ b/nova/api/metadata/handler.py @@ -99,7 +99,7 @@ class MetadataRequestHandler(wsgi.Application): # 'ephemeralN', 'swap' and ebs for bdm in db.block_device_mapping_get_all_by_instance( - ctxt, instance_ref['id']): + ctxt, instance_ref['uuid']): if bdm['no_device']: continue diff --git a/nova/compute/api.py b/nova/compute/api.py index 307b5e456..542782c8b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -484,7 +484,7 @@ class API(BaseAPI): return size def _update_image_block_device_mapping(self, elevated_context, - instance_type, instance_id, + instance_type, instance_uuid, mappings): """tell vm driver to create ephemeral/swap device at boot time by updating BlockDeviceMapping @@ -507,7 +507,7 @@ class API(BaseAPI): continue values = { - 'instance_id': instance_id, + 'instance_uuid': instance_uuid, 'device_name': bdm['device'], 'virtual_name': virtual_name, 'volume_size': size} @@ -515,7 +515,7 @@ class API(BaseAPI): values) def _update_block_device_mapping(self, elevated_context, - instance_type, instance_id, + instance_type, instance_uuid, block_device_mapping): """tell vm driver to attach volume at boot time by updating BlockDeviceMapping @@ -524,7 +524,7 @@ class API(BaseAPI): for bdm in block_device_mapping: assert 'device_name' in bdm - values = {'instance_id': instance_id} + values = {'instance_uuid': instance_uuid} for key in ('device_name', 'delete_on_termination', 'virtual_name', 'snapshot_id', 'volume_id', 'volume_size', 'no_device'): @@ -587,12 +587,13 @@ class API(BaseAPI): # BlockDeviceMapping table self._update_image_block_device_mapping(elevated, instance_type, - instance_id, image['properties'].get('mappings', [])) - self._update_block_device_mapping(elevated, instance_type, instance_id, + instance_uuid, image['properties'].get('mappings', [])) + self._update_block_device_mapping(elevated, instance_type, + instance_uuid, image['properties'].get('block_device_mapping', [])) # override via command line option - self._update_block_device_mapping(elevated, instance_type, instance_id, - block_device_mapping) + self._update_block_device_mapping(elevated, instance_type, + instance_uuid, block_device_mapping) # Set sane defaults if not specified updates = {} diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0149b1116..71f7e817b 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -355,7 +355,7 @@ class ComputeManager(manager.SchedulerDependentManager): swap = None ephemerals = [] for bdm in self.db.block_device_mapping_get_all_by_instance( - context, instance['id']): + context, instance['uuid']): LOG.debug(_('Setting up bdm %s'), bdm, instance=instance) if bdm['no_device']: @@ -617,13 +617,13 @@ class ComputeManager(manager.SchedulerDependentManager): instance=instance) self.network_api.deallocate_for_instance(context, instance) - def _get_instance_volume_bdms(self, context, instance_id): + def _get_instance_volume_bdms(self, context, instance_uuid): bdms = self.db.block_device_mapping_get_all_by_instance(context, - instance_id) + instance_uuid) return [bdm for bdm in bdms if bdm['volume_id']] - def _get_instance_volume_bdm(self, context, instance_id, volume_id): - bdms = self._get_instance_volume_bdms(context, instance_id) + def _get_instance_volume_bdm(self, context, instance_uuid, volume_id): + bdms = self._get_instance_volume_bdms(context, instance_uuid) for bdm in bdms: # NOTE(vish): Comparing as strings because the os_api doesn't # convert to integer and we may wish to support uuids @@ -631,8 +631,8 @@ class ComputeManager(manager.SchedulerDependentManager): if str(bdm['volume_id']) == str(volume_id): return bdm - def _get_instance_volume_block_device_info(self, context, instance_id): - bdms = self._get_instance_volume_bdms(context, instance_id) + def _get_instance_volume_block_device_info(self, context, instance_uuid): + bdms = self._get_instance_volume_bdms(context, instance_uuid) block_device_mapping = [] for bdm in bdms: cinfo = utils.loads(bdm['connection_info']) @@ -668,8 +668,6 @@ class ComputeManager(manager.SchedulerDependentManager): def _shutdown_instance(self, context, instance, action_str): """Shutdown an instance on this host.""" context = context.elevated() - instance_id = instance['id'] - instance_uuid = instance['uuid'] LOG.audit(_('%(action_str)s instance') % {'action_str': action_str}, context=context, instance=instance) @@ -681,9 +679,9 @@ class ComputeManager(manager.SchedulerDependentManager): self._deallocate_network(context, instance) # NOTE(vish) get bdms before destroying the instance - bdms = self._get_instance_volume_bdms(context, instance_id) + bdms = self._get_instance_volume_bdms(context, instance['uuid']) block_device_info = self._get_instance_volume_block_device_info( - context, instance_id) + context, instance['uuid']) self.driver.destroy(instance, self._legacy_nw_info(network_info), block_device_info) for bdm in bdms: @@ -702,11 +700,12 @@ class ComputeManager(manager.SchedulerDependentManager): self._notify_about_instance_usage(context, instance, "shutdown.end") - def _cleanup_volumes(self, context, instance_id): + def _cleanup_volumes(self, context, instance_uuid): bdms = self.db.block_device_mapping_get_all_by_instance(context, - instance_id) + instance_uuid) for bdm in bdms: - LOG.debug(_("terminating bdm %s") % bdm) + LOG.debug(_("terminating bdm %s") % bdm, + instance_uuid=instance_uuid) if bdm['volume_id'] and bdm['delete_on_termination']: volume = self.volume_api.get(context, bdm['volume_id']) self.volume_api.delete(context, volume) @@ -717,7 +716,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id = instance['id'] self._notify_about_instance_usage(context, instance, "delete.start") self._shutdown_instance(context, instance, 'Terminating') - self._cleanup_volumes(context, instance_id) + self._cleanup_volumes(context, instance['uuid']) instance = self._instance_update(context, instance_id, vm_state=vm_states.DELETED, @@ -1712,7 +1711,6 @@ class ComputeManager(manager.SchedulerDependentManager): volume = self.volume_api.get(context, volume_id) context = context.elevated() instance_ref = self.db.instance_get_by_uuid(context, instance_uuid) - instance_id = instance_ref['id'] LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'), locals(), context=context, instance=instance_ref) try: @@ -1741,9 +1739,9 @@ class ComputeManager(manager.SchedulerDependentManager): volume, connector) - self.volume_api.attach(context, volume, instance_id, mountpoint) + self.volume_api.attach(context, volume, instance_ref['id'], mountpoint) values = { - 'instance_id': instance_id, + 'instance_uuid': instance_ref['uuid'], 'connection_info': utils.dumps(connection_info), 'device_name': mountpoint, 'delete_on_termination': False, @@ -1778,15 +1776,14 @@ class ComputeManager(manager.SchedulerDependentManager): def detach_volume(self, context, instance_uuid, volume_id): """Detach a volume from an instance.""" instance_ref = self.db.instance_get_by_uuid(context, instance_uuid) - instance_id = instance_ref['id'] - bdm = self._get_instance_volume_bdm(context, instance_id, volume_id) + bdm = self._get_instance_volume_bdm(context, instance_uuid, volume_id) self._detach_volume(context, instance_ref, bdm) volume = self.volume_api.get(context, volume_id) connector = self.driver.get_volume_connector(instance_ref) self.volume_api.terminate_connection(context, volume, connector) self.volume_api.detach(context.elevated(), volume) self.db.block_device_mapping_destroy_by_instance_and_volume( - context, instance_id, volume_id) + context, instance_uuid, volume_id) return True @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1889,7 +1886,7 @@ class ComputeManager(manager.SchedulerDependentManager): # If any volume is mounted, prepare here. block_device_info = self._get_instance_volume_block_device_info( - context, instance_id) + context, instance_ref['uuid']) if not block_device_info['block_device_mapping']: LOG.info(_('Instance has no volume.'), instance=instance_ref) @@ -1959,7 +1956,7 @@ class ComputeManager(manager.SchedulerDependentManager): try: # Checking volume node is working correctly when any volumes # are attached to instances. - if self._get_instance_volume_bdms(context, instance_id): + if self._get_instance_volume_bdms(context, instance_ref['uuid']): rpc.call(context, FLAGS.volume_topic, {'method': 'check_for_export', @@ -2011,7 +2008,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance=instance_ref) # Detaching volumes. - for bdm in self._get_instance_volume_bdms(ctxt, instance_ref['id']): + for bdm in self._get_instance_volume_bdms(ctxt, instance_ref['uuid']): # NOTE(vish): We don't want to actually mark the volume # detached, or delete the bdm, just remove the # connection from this host. @@ -2147,7 +2144,8 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_api.setup_networks_on_host(context, instance_ref, self.host) - for bdm in self._get_instance_volume_bdms(context, instance_ref['id']): + for bdm in self._get_instance_volume_bdms(context, + instance_ref['uuid']): volume_id = bdm['volume_id'] volume = self.volume_api.get(context, volume_id) self.volume_api.update(context, volume, {'status': 'in-use'}) @@ -2478,7 +2476,7 @@ class ComputeManager(manager.SchedulerDependentManager): "DELETED but still present on host."), locals(), instance=instance) self._shutdown_instance(context, instance, 'Terminating') - self._cleanup_volumes(context, instance['id']) + self._cleanup_volumes(context, instance['uuid']) else: raise Exception(_("Unrecognized value '%(action)s'" " for FLAGS.running_deleted_" diff --git a/nova/db/api.py b/nova/db/api.py index 184c2ab55..5de921667 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1086,9 +1086,10 @@ def block_device_mapping_update_or_create(context, values): return IMPL.block_device_mapping_update_or_create(context, values) -def block_device_mapping_get_all_by_instance(context, instance_id): +def block_device_mapping_get_all_by_instance(context, instance_uuid): """Get all block device mapping belonging to a instance""" - return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) + return IMPL.block_device_mapping_get_all_by_instance(context, + instance_uuid) def block_device_mapping_destroy(context, bdm_id): @@ -1096,11 +1097,11 @@ def block_device_mapping_destroy(context, bdm_id): return IMPL.block_device_mapping_destroy(context, bdm_id) -def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, +def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, volume_id): """Destroy the block device mapping or raise if it does not exist.""" return IMPL.block_device_mapping_destroy_by_instance_and_volume( - context, instance_id, volume_id) + context, instance_uuid, volume_id) #################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 2785b2b03..1d7509aef 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1316,7 +1316,7 @@ def instance_destroy(context, instance_id): 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.BlockDeviceMapping).\ - filter_by(instance_id=instance_id).\ + filter_by(instance_uuid=instance_ref['uuid']).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2726,7 +2726,7 @@ def block_device_mapping_update_or_create(context, values): session = get_session() with session.begin(): result = _block_device_mapping_get_query(context, session=session).\ - filter_by(instance_id=values['instance_id']).\ + filter_by(instance_uuid=values['instance_uuid']).\ filter_by(device_name=values['device_name']).\ first() if not result: @@ -2742,7 +2742,7 @@ def block_device_mapping_update_or_create(context, values): if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): session.query(models.BlockDeviceMapping).\ - filter_by(instance_id=values['instance_id']).\ + filter_by(instance_uuid=values['instance_uuid']).\ filter_by(virtual_name=virtual_name).\ filter(models.BlockDeviceMapping.device_name != values['device_name']).\ @@ -2752,9 +2752,9 @@ def block_device_mapping_update_or_create(context, values): @require_context -def block_device_mapping_get_all_by_instance(context, instance_id): +def block_device_mapping_get_all_by_instance(context, instance_uuid): return _block_device_mapping_get_query(context).\ - filter_by(instance_id=instance_id).\ + filter_by(instance_uuid=instance_uuid).\ all() @@ -2770,12 +2770,12 @@ def block_device_mapping_destroy(context, bdm_id): @require_context -def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, +def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, volume_id): session = get_session() with session.begin(): _block_device_mapping_get_query(context, session=session).\ - filter_by(instance_id=instance_id).\ + filter_by(instance_uuid=instance_uuid).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), diff --git a/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py new file mode 100644 index 000000000..524d04d3f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2012 Michael Still and Canonical Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import select, Column, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from migrate import ForeignKeyConstraint + +from nova import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + uuid_column = Column('instance_uuid', String(36)) + uuid_column.create(block_device_mapping) + + try: + block_device_mapping.update().values( + instance_uuid=select( + [instances.c.uuid], + instances.c.id == block_device_mapping.c.instance_id) + ).execute() + except Exception: + uuid_column.drop() + raise + + fkeys = list(block_device_mapping.c.instance_id.foreign_keys) + if fkeys: + try: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint( + columns=[block_device_mapping.c.instance_id], + refcolumns=[instances.c.id], + name=fkey_name).drop() + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + block_device_mapping.c.instance_id.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + id_column = Column('instance_id', Integer, ForeignKey('instances.id')) + id_column.create(block_device_mapping) + + try: + block_device_mapping.update().values( + instance_id=select( + [instances.c.id], + instances.c.uuid == block_device_mapping.c.instance_uuid) + ).execute() + except Exception: + id_column.drop() + raise + + block_device_mapping.c.instance_uuid.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql new file mode 100644 index 000000000..3699ce9ab --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql @@ -0,0 +1,97 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + NULL, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info, + instance_uuid + FROM block_device_mapping; + + UPDATE block_device_mapping_backup + SET instance_id= + (SELECT id + FROM instances + WHERE block_device_mapping_backup.instance_uuid = instances.uuid + ); + + DROP TABLE block_device_mapping; + + CREATE TABLE block_device_mapping ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info + FROM block_device_mapping_backup; + + DROP TABLE block_device_mapping_backup; + +COMMIT;
\ No newline at end of file diff --git a/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql new file mode 100644 index 000000000..d75d2ffa2 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql @@ -0,0 +1,97 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info, + NULL + FROM block_device_mapping; + + UPDATE block_device_mapping_backup + SET instance_uuid= + (SELECT uuid + FROM instances + WHERE block_device_mapping_backup.instance_id = instances.id + ); + + DROP TABLE block_device_mapping; + + CREATE TABLE block_device_mapping ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_uuid) REFERENCES instances (uuid) + ); + + INSERT INTO block_device_mapping + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info, + instance_uuid + FROM block_device_mapping_backup; + + DROP TABLE block_device_mapping_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index 7338a6a80..452c78c19 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -23,12 +23,17 @@ import sys from nova.db.sqlalchemy.session import get_engine from nova import exception from nova import flags +from nova import log as logging + import sqlalchemy import migrate from migrate.versioning import util as migrate_util +LOG = logging.getLogger(__name__) + + @migrate_util.decorator def patched_with_engine(f, *a, **kw): url = a[0] diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index c79db1f04..1544629ff 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -482,12 +482,14 @@ class BlockDeviceMapping(BASE, NovaBase): __tablename__ = "block_device_mapping" id = Column(Integer, primary_key=True, autoincrement=True) - instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) + instance_uuid = Column(Integer, ForeignKey('instances.uuid'), + nullable=False) instance = relationship(Instance, backref=backref('balock_device_mapping'), - foreign_keys=instance_id, - primaryjoin='and_(BlockDeviceMapping.instance_id==' - 'Instance.id,' + foreign_keys=instance_uuid, + primaryjoin='and_(BlockDeviceMapping.' + 'instance_uuid==' + 'Instance.uuid,' 'BlockDeviceMapping.deleted==' 'False)') device_name = Column(String(255), nullable=False) diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 1611871db..7b6fb34c2 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -945,42 +945,43 @@ class CloudTestCase(test.TestCase): 'root_device_name': '/dev/sdc1'}) instance_id = inst1['id'] + instance_uuid = inst1['uuid'] mappings0 = [ - {'instance_id': instance_id, + {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb1', 'snapshot_id': '1', 'volume_id': '2'}, - {'instance_id': instance_id, + {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb2', 'volume_id': '3', 'volume_size': 1}, - {'instance_id': instance_id, + {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb3', 'delete_on_termination': True, 'snapshot_id': '4', 'volume_id': '5'}, - {'instance_id': instance_id, + {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb4', 'delete_on_termination': False, 'snapshot_id': '6', 'volume_id': '7'}, - {'instance_id': instance_id, + {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb5', 'snapshot_id': '8', 'volume_id': '9', 'volume_size': 0}, - {'instance_id': instance_id, + {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb6', 'snapshot_id': '10', 'volume_id': '11', 'volume_size': 1}, - {'instance_id': instance_id, + {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb7', 'no_device': True}, - {'instance_id': instance_id, + {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb8', 'virtual_name': 'swap'}, - {'instance_id': instance_id, + {'instance_uuid': instance_uuid, 'device_name': '/dev/sdb9', 'virtual_name': 'ephemeral3'}] @@ -990,9 +991,9 @@ class CloudTestCase(test.TestCase): def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes): for vol in volumes: db.volume_destroy(self.context, vol['id']) - for id in (inst1['id'], inst2['id']): + for uuid in (inst1['uuid'], inst2['uuid']): for bdm in db.block_device_mapping_get_all_by_instance( - self.context, id): + self.context, uuid): db.block_device_mapping_destroy(self.context, bdm['id']) db.instance_destroy(self.context, inst2['id']) db.instance_destroy(self.context, inst1['id']) @@ -1043,8 +1044,8 @@ class CloudTestCase(test.TestCase): (inst1, inst2, volumes) = self._setUpBlockDeviceMapping() result = {} - self.cloud._format_instance_bdm(self.context, inst1['id'], '/dev/sdb1', - result) + self.cloud._format_instance_bdm(self.context, inst1['uuid'], + '/dev/sdb1', result) self.assertSubDictMatch( {'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']}, result) @@ -1052,8 +1053,8 @@ class CloudTestCase(test.TestCase): self._expected_block_device_mapping0, result['blockDeviceMapping']) result = {} - self.cloud._format_instance_bdm(self.context, inst2['id'], '/dev/sdc1', - result) + self.cloud._format_instance_bdm(self.context, inst2['uuid'], + '/dev/sdc1', result) self.assertSubDictMatch( {'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']}, result) @@ -2166,6 +2167,7 @@ class CloudTestCase(test.TestCase): def fake_get(ctxt, instance_id): return { 'id': 0, + 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85', 'root_device_name': '/dev/sdh', 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], 'vm_state': vm_states.STOPPED, diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py index ec8f09387..3765c9425 100644 --- a/nova/tests/api/ec2/test_ec2_validate.py +++ b/nova/tests/api/ec2/test_ec2_validate.py @@ -124,7 +124,7 @@ class EC2ValidateTestCase(test.TestCase): instance_id='i-1234', device='/dev/vdc') - def test_describe_instance_ttribute(self): + def test_describe_instance_attribute(self): for ec2_id, e in self.ec2_id_exception_map: self.assertRaises(e, self.cloud.describe_instance_attribute, diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 0fae2d48f..ca159c891 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -1403,37 +1403,38 @@ class ComputeTestCase(BaseTestCase): """Confirm exception when pre_live_migration fails.""" # creating instance testdata inst_ref = self._create_fake_instance({'host': 'dummy'}) - instance_id = inst_ref['id'] + c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host']) # creating volume testdata volume_id = 1 db.volume_create(c, {'id': volume_id}) - values = {'instance_id': instance_id, 'device_name': '/dev/vdc', - 'delete_on_termination': False, 'volume_id': volume_id} + values = {'instance_uuid': inst_ref['uuid'], 'device_name': '/dev/vdc', + 'delete_on_termination': False, 'volume_id': volume_id} db.block_device_mapping_create(c, values) # creating mocks self.mox.StubOutWithMock(rpc, 'call') - rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export", - "args": {'instance_id': instance_id}}) + rpc.call(c, FLAGS.volume_topic, + {"method": "check_for_export", + "args": {'instance_id': inst_ref['id']}}) self.mox.StubOutWithMock(self.compute.driver, 'get_instance_disk_info') self.compute.driver.get_instance_disk_info(inst_ref.name) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': instance_id, + "args": {'instance_id': inst_ref['id'], 'block_migration': True, 'disk': None} }).AndRaise(rpc.common.RemoteError('', '', '')) # mocks for rollback rpc.call(c, 'network', {'method': 'setup_networks_on_host', - 'args': {'instance_id': instance_id, + 'args': {'instance_id': inst_ref['id'], 'host': self.compute.host, 'teardown': False}}) rpc.call(c, topic, {"method": "remove_volume_connection", - "args": {'instance_id': instance_id, + "args": {'instance_id': inst_ref['id'], 'volume_id': volume_id}}) rpc.cast(c, topic, {"method": "rollback_live_migration_at_destination", "args": {'instance_id': inst_ref['id']}}) @@ -1442,11 +1443,11 @@ class ComputeTestCase(BaseTestCase): self.mox.ReplayAll() self.assertRaises(rpc_common.RemoteError, self.compute.live_migration, - c, instance_id, inst_ref['host'], True) + c, inst_ref['id'], inst_ref['host'], True) # cleanup - for bdms in db.block_device_mapping_get_all_by_instance(c, - instance_id): + for bdms in db.block_device_mapping_get_all_by_instance( + c, inst_ref['uuid']): db.block_device_mapping_destroy(c, bdms['id']) db.volume_destroy(c, volume_id) db.instance_destroy(c, inst_ref['id']) @@ -1676,7 +1677,7 @@ class ComputeTestCase(BaseTestCase): self.mox.StubOutWithMock(self.compute, "_cleanup_volumes") self.compute._cleanup_volumes(admin_context, - instance['id']).AndReturn(None) + instance['uuid']).AndReturn(None) self.mox.ReplayAll() self.compute._cleanup_running_deleted_instances(admin_context) @@ -3096,11 +3097,11 @@ class ComputeAPITestCase(BaseTestCase): 'no_device': True}] self.compute_api._update_image_block_device_mapping( - self.context, instance_type, instance['id'], mappings) + self.context, instance_type, instance['uuid'], mappings) bdms = [self._parse_db_block_device_mapping(bdm_ref) for bdm_ref in db.block_device_mapping_get_all_by_instance( - self.context, instance['id'])] + self.context, instance['uuid'])] expected_result = [ {'virtual_name': 'swap', 'device_name': '/dev/sdb1', 'volume_size': swap_size}, @@ -3117,10 +3118,10 @@ class ComputeAPITestCase(BaseTestCase): self.compute_api._update_block_device_mapping( self.context, instance_types.get_default_instance_type(), - instance['id'], block_device_mapping) + instance['uuid'], block_device_mapping) bdms = [self._parse_db_block_device_mapping(bdm_ref) for bdm_ref in db.block_device_mapping_get_all_by_instance( - self.context, instance['id'])] + self.context, instance['uuid'])] expected_result = [ {'snapshot_id': 0x12345678, 'device_name': '/dev/sda1'}, @@ -3144,7 +3145,7 @@ class ComputeAPITestCase(BaseTestCase): self.assertDictListMatch(bdms, expected_result) for bdm in db.block_device_mapping_get_all_by_instance( - self.context, instance['id']): + self.context, instance['uuid']): db.block_device_mapping_destroy(self.context, bdm['id']) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.compute.terminate_instance(self.context, instance['uuid']) diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index 7bd3ebcdd..4a54d55ce 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -47,20 +47,21 @@ class MetadataTestCase(test.TestCase): def setUp(self): super(MetadataTestCase, self).setUp() self.instance = ({'id': 1, - 'name': 'fake', - 'project_id': 'test', - 'key_name': None, - 'host': 'test', - 'launch_index': 1, - 'instance_type': {'name': 'm1.tiny'}, - 'reservation_id': 'r-xxxxxxxx', - 'user_data': '', - 'image_ref': 7, - 'vcpus': 1, - 'fixed_ips': [], - 'root_device_name': '/dev/sda1', - 'info_cache': {'network_info': []}, - 'hostname': 'test'}) + 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2', + 'name': 'fake', + 'project_id': 'test', + 'key_name': None, + 'host': 'test', + 'launch_index': 1, + 'instance_type': {'name': 'm1.tiny'}, + 'reservation_id': 'r-xxxxxxxx', + 'user_data': '', + 'image_ref': 7, + 'vcpus': 1, + 'fixed_ips': [], + 'root_device_name': '/dev/sda1', + 'info_cache': {'network_info': []}, + 'hostname': 'test'}) def fake_get_floating_ips_by_fixed_address(self, context, fixed_ip): return ['1.2.3.4', '5.6.7.8'] @@ -144,11 +145,13 @@ class MetadataTestCase(test.TestCase): """Make sure that _get_instance_mapping works""" ctxt = None instance_ref0 = {'id': 0, + 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85', 'root_device_name': None} instance_ref1 = {'id': 0, + 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2', 'root_device_name': '/dev/sda1'} - def fake_bdm_get(ctxt, id): + def fake_bdm_get(ctxt, uuid): return [{'volume_id': 87654321, 'snapshot_id': None, 'no_device': None, |
