diff options
| author | Lorin Hochstein <lorin@isi.edu> | 2011-06-22 23:34:56 -0400 |
|---|---|---|
| committer | Lorin Hochstein <lorin@isi.edu> | 2011-06-22 23:34:56 -0400 |
| commit | df44068a801aba373e8896bba235f2abca4e4c8a (patch) | |
| tree | 942f208195d995b678a151986ad3d25932f1c2b1 /nova/db | |
| parent | 6afcabac7442aa2e3944a3fef3d3452c189c1901 (diff) | |
| parent | 6d6720e9b7e52461238ece684c9acc7183673bb8 (diff) | |
| download | nova-df44068a801aba373e8896bba235f2abca4e4c8a.tar.gz nova-df44068a801aba373e8896bba235f2abca4e4c8a.tar.xz nova-df44068a801aba373e8896bba235f2abca4e4c8a.zip | |
Merged from trunk
Diffstat (limited to 'nova/db')
| -rw-r--r-- | nova/db/api.py | 70 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/api.py | 211 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py | 87 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py | 43 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py | 73 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/migrate_repo/versions/027_add_instance_type_extra_specs.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/024_add_instance_type_extra_specs.py) | 0 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/models.py | 55 |
7 files changed, 512 insertions, 27 deletions
diff --git a/nova/db/api.py b/nova/db/api.py index a1a434d3d..0c9f45db6 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -414,6 +414,16 @@ def instance_destroy(context, instance_id): return IMPL.instance_destroy(context, instance_id) +def instance_stop(context, instance_id): + """Stop the instance or raise if it does not exist.""" + return IMPL.instance_stop(context, instance_id) + + +def instance_get_by_uuid(context, uuid): + """Get an instance or raise if it does not exist.""" + return IMPL.instance_get_by_uuid(context, uuid) + + def instance_get(context, instance_id): """Get an instance or raise if it does not exist.""" return IMPL.instance_get(context, instance_id) @@ -920,6 +930,36 @@ def snapshot_update(context, snapshot_id, values): #################### +def block_device_mapping_create(context, values): + """Create an entry of block device mapping""" + return IMPL.block_device_mapping_create(context, values) + + +def block_device_mapping_update(context, bdm_id, values): + """Create an entry of block device mapping""" + return IMPL.block_device_mapping_update(context, bdm_id, values) + + +def block_device_mapping_get_all_by_instance(context, instance_id): + """Get all block device mapping belonging to a instance""" + return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) + + +def block_device_mapping_destroy(context, bdm_id): + """Destroy the block device mapping.""" + return IMPL.block_device_mapping_destroy(context, bdm_id) + + +def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, + volume_id): + """Destroy the block device mapping or raise if it does not exist.""" + return IMPL.block_device_mapping_destroy_by_instance_and_volume( + context, instance_id, volume_id) + + +#################### + + def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) @@ -1248,6 +1288,36 @@ def instance_metadata_update_or_create(context, instance_id, metadata): """Create or update instance metadata.""" IMPL.instance_metadata_update_or_create(context, instance_id, metadata) + +#################### + + +def agent_build_get_all(context): + """Get all agent builds.""" + return IMPL.agent_build_get_all(context) + + +def agent_build_destroy(context, agent_update_id): + """Destroy agent build entry.""" + IMPL.agent_build_destroy(context, agent_update_id) + + +def agent_build_update(context, agent_build_id, values): + """Update agent build entry.""" + IMPL.agent_build_update(context, agent_build_id, values) + + +def agent_build_create(context, values): + """Create a new agent build entry.""" + return IMPL.agent_build_create(context, values) + + +def agent_build_get_by_triple(context, hypervisor, os, architecture): + """Get agent build by hypervisor/OS/architecture triple.""" + return IMPL.agent_build_get_by_triple(context, hypervisor, os, + architecture) + + #################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 478837f7f..0692bc917 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -18,7 +18,7 @@ """ Implementation of SQLAlchemy backend. """ - +import traceback import warnings from nova import db @@ -797,6 +797,8 @@ def instance_create(context, values): values['metadata'] = _metadata_refs(values.get('metadata')) instance_ref = models.Instance() + instance_ref['uuid'] = str(utils.gen_uuid()) + instance_ref.update(values) session = get_session() @@ -840,37 +842,65 @@ def instance_destroy(context, instance_id): @require_context +def instance_stop(context, instance_id): + session = get_session() + with session.begin(): + from nova.compute import power_state + session.query(models.Instance).\ + filter_by(id=instance_id).\ + update({'host': None, + 'state': power_state.SHUTOFF, + 'state_description': 'stopped', + 'updated_at': literal_column('updated_at')}) + session.query(models.SecurityGroupInstanceAssociation).\ + filter_by(instance_id=instance_id).\ + update({'updated_at': literal_column('updated_at')}) + session.query(models.InstanceMetadata).\ + filter_by(instance_id=instance_id).\ + update({'updated_at': literal_column('updated_at')}) + + +@require_context +def instance_get_by_uuid(context, uuid, session=None): + partial = _build_instance_get(context, session=session) + result = partial.filter_by(uuid=uuid) + result = result.first() + if not result: + # FIXME(sirp): it would be nice if InstanceNotFound would accept a + # uuid parameter as well + raise exception.InstanceNotFound(instance_id=uuid) + return result + + +@require_context def instance_get(context, instance_id, session=None): + partial = _build_instance_get(context, session=session) + result = partial.filter_by(id=instance_id) + result = result.first() + if not result: + raise exception.InstanceNotFound(instance_id=instance_id) + return result + + +@require_context +def _build_instance_get(context, session=None): if not session: session = get_session() - result = None + + partial = session.query(models.Instance).\ + options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('security_groups.rules')).\ + options(joinedload('volumes')).\ + options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ + options(joinedload('instance_type')) if is_admin_context(context): - result = session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload_all('security_groups.rules')).\ - options(joinedload('volumes')).\ - options(joinedload_all('fixed_ip.network')).\ - options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ - filter_by(id=instance_id).\ - filter_by(deleted=can_read_deleted(context)).\ - first() + partial = partial.filter_by(deleted=can_read_deleted(context)) elif is_user_context(context): - result = session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload_all('security_groups.rules')).\ - options(joinedload('volumes')).\ - options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ - filter_by(project_id=context.project_id).\ - filter_by(id=instance_id).\ - filter_by(deleted=False).\ - first() - if not result: - raise exception.InstanceNotFound(instance_id=instance_id) - - return result + partial = partial.filter_by(project_id=context.project_id).\ + filter_by(deleted=False) + return partial @require_admin_context @@ -1883,6 +1913,66 @@ def snapshot_update(context, snapshot_id, values): @require_context +def block_device_mapping_create(context, values): + bdm_ref = models.BlockDeviceMapping() + bdm_ref.update(values) + + session = get_session() + with session.begin(): + bdm_ref.save(session=session) + + +@require_context +def block_device_mapping_update(context, bdm_id, values): + session = get_session() + with session.begin(): + session.query(models.BlockDeviceMapping).\ + filter_by(id=bdm_id).\ + filter_by(deleted=False).\ + update(values) + + +@require_context +def block_device_mapping_get_all_by_instance(context, instance_id): + session = get_session() + result = session.query(models.BlockDeviceMapping).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False).\ + all() + if not result: + return [] + return result + + +@require_context +def block_device_mapping_destroy(context, bdm_id): + session = get_session() + with session.begin(): + session.query(models.BlockDeviceMapping).\ + filter_by(id=bdm_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, + volume_id): + session = get_session() + with session.begin(): + session.query(models.BlockDeviceMapping).\ + filter_by(instance_id=instance_id).\ + filter_by(volume_id=volume_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +################### + + +@require_context def security_group_get_all(context): session = get_session() return session.query(models.SecurityGroup).\ @@ -2630,7 +2720,17 @@ def zone_get_all(context): #################### + +def require_instance_exists(func): + def new_func(context, instance_id, *args, **kwargs): + db.api.instance_get(context, instance_id) + return func(context, instance_id, *args, **kwargs) + new_func.__name__ = func.__name__ + return new_func + + @require_context +@require_instance_exists def instance_metadata_get(context, instance_id): session = get_session() @@ -2646,6 +2746,7 @@ def instance_metadata_get(context, instance_id): @require_context +@require_instance_exists def instance_metadata_delete(context, instance_id, key): session = get_session() session.query(models.InstanceMetadata).\ @@ -2658,6 +2759,7 @@ def instance_metadata_delete(context, instance_id, key): @require_context +@require_instance_exists def instance_metadata_delete_all(context, instance_id): session = get_session() session.query(models.InstanceMetadata).\ @@ -2669,6 +2771,7 @@ def instance_metadata_delete_all(context, instance_id): @require_context +@require_instance_exists def instance_metadata_get_item(context, instance_id, key): session = get_session() @@ -2685,6 +2788,7 @@ def instance_metadata_get_item(context, instance_id, key): @require_context +@require_instance_exists def instance_metadata_update_or_create(context, instance_id, metadata): session = get_session() @@ -2704,6 +2808,61 @@ def instance_metadata_update_or_create(context, instance_id, metadata): return metadata + +#################### + + +@require_admin_context +def agent_build_create(context, values): + agent_build_ref = models.AgentBuild() + agent_build_ref.update(values) + agent_build_ref.save() + return agent_build_ref + + +@require_admin_context +def agent_build_get_by_triple(context, hypervisor, os, architecture, + session=None): + if not session: + session = get_session() + return session.query(models.AgentBuild).\ + filter_by(hypervisor=hypervisor).\ + filter_by(os=os).\ + filter_by(architecture=architecture).\ + filter_by(deleted=False).\ + first() + + +@require_admin_context +def agent_build_get_all(context): + session = get_session() + return session.query(models.AgentBuild).\ + filter_by(deleted=False).\ + all() + + +@require_admin_context +def agent_build_destroy(context, agent_build_id): + session = get_session() + with session.begin(): + session.query(models.AgentBuild).\ + filter_by(id=agent_build_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def agent_build_update(context, agent_build_id, values): + session = get_session() + with session.begin(): + agent_build_ref = session.query(models.AgentBuild).\ + filter_by(id=agent_build_id). \ + first() + agent_build_ref.update(values) + agent_build_ref.save(session=session) + + #################### diff --git a/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py new file mode 100644 index 000000000..6e9b806cb --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py @@ -0,0 +1,87 @@ +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column +from sqlalchemy import DateTime, Boolean, Integer, String +from sqlalchemy import ForeignKey +from nova import log as logging + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +snapshots = Table('snapshots', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +block_device_mapping = Table('block_device_mapping', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, autoincrement=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('device_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('delete_on_termination', + Boolean(create_constraint=True, name=None), + default=False), + Column('virtual_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True), + Column('snapshot_id', + Integer(), + ForeignKey('snapshots.id'), + nullable=True), + Column('volume_id', Integer(), ForeignKey('volumes.id'), + nullable=True), + Column('volume_size', Integer(), nullable=True), + Column('no_device', + Boolean(create_constraint=True, name=None), + nullable=True), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + try: + block_device_mapping.create() + except Exception: + logging.info(repr(block_device_mapping)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[block_device_mapping]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + block_device_mapping.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py new file mode 100644 index 000000000..27f30d536 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from nova import utils + + +meta = MetaData() + +instances = Table("instances", meta, + Column("id", Integer(), primary_key=True, nullable=False)) +uuid_column = Column("uuid", String(36)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + instances.create_column(uuid_column) + + rows = migrate_engine.execute(instances.select()) + for row in rows: + instance_uuid = str(utils.gen_uuid()) + migrate_engine.execute(instances.update()\ + .where(instances.c.id == row[0])\ + .values(uuid=instance_uuid)) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances.drop_column(uuid_column) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py new file mode 100644 index 000000000..640e96138 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py @@ -0,0 +1,73 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from nova import log as logging + +meta = MetaData() + +# +# New Tables +# +builds = Table('agent_builds', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('hypervisor', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('os', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('architecture', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('version', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('md5hash', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# New Column +# + +architecture = Column('architecture', String(length=255)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (builds, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + # Add columns to existing tables + instances.create_column(architecture) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/024_add_instance_type_extra_specs.py b/nova/db/sqlalchemy/migrate_repo/versions/027_add_instance_type_extra_specs.py index f26ad6d2c..f26ad6d2c 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/024_add_instance_type_extra_specs.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/027_add_instance_type_extra_specs.py diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 96f85b4e5..4390f1be3 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -232,7 +232,9 @@ class Instance(BASE, NovaBase): locked = Column(Boolean) os_type = Column(String(255)) + architecture = Column(String(255)) vm_mode = Column(String(255)) + uuid = Column(String(36)) # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such @@ -357,6 +359,45 @@ class Snapshot(BASE, NovaBase): display_description = Column(String(255)) +class BlockDeviceMapping(BASE, NovaBase): + """Represents block device mapping that is defined by EC2""" + __tablename__ = "block_device_mapping" + id = Column(Integer, primary_key=True, autoincrement=True) + + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) + instance = relationship(Instance, + backref=backref('balock_device_mapping'), + foreign_keys=instance_id, + primaryjoin='and_(BlockDeviceMapping.instance_id==' + 'Instance.id,' + 'BlockDeviceMapping.deleted==' + 'False)') + device_name = Column(String(255), nullable=False) + + # default=False for compatibility of the existing code. + # With EC2 API, + # default True for ami specified device. + # default False for created with other timing. + delete_on_termination = Column(Boolean, default=False) + + # for ephemeral device + virtual_name = Column(String(255), nullable=True) + + # for snapshot or volume + snapshot_id = Column(Integer, ForeignKey('snapshots.id'), nullable=True) + # outer join + snapshot = relationship(Snapshot, + foreign_keys=snapshot_id) + + volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, + foreign_keys=volume_id) + volume_size = Column(Integer, nullable=True) + + # for no device to suppress devices. + no_device = Column(Boolean, nullable=True) + + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' @@ -688,6 +729,18 @@ class Zone(BASE, NovaBase): password = Column(String(255)) +class AgentBuild(BASE, NovaBase): + """Represents an agent build.""" + __tablename__ = 'agent_builds' + id = Column(Integer, primary_key=True) + hypervisor = Column(String(255)) + os = Column(String(255)) + architecture = Column(String(255)) + version = Column(String(255)) + url = Column(String(255)) + md5hash = Column(String(255)) + + def register_models(): """Register Models and create metadata. @@ -701,7 +754,7 @@ def register_models(): Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, Project, Certificate, ConsolePool, Console, Zone, - InstanceMetadata, InstanceTypeExtraSpecs, Migration) + AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) |
