diff options
| author | Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp> | 2011-01-21 20:04:02 +0900 |
|---|---|---|
| committer | Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp> | 2011-01-21 20:04:02 +0900 |
| commit | d55e281efef06dbbcfec9ef4aad4ed0bac9a9368 (patch) | |
| tree | 4ae24944e609ae20092e8e6a2219b6da963de4c4 /nova | |
| parent | 3294d3f98cb78b169656711c73547e1cf0527432 (diff) | |
| parent | 14edbd55e667b16b8d46c0230b11ccd964f5742f (diff) | |
| download | nova-d55e281efef06dbbcfec9ef4aad4ed0bac9a9368.tar.gz nova-d55e281efef06dbbcfec9ef4aad4ed0bac9a9368.tar.xz nova-d55e281efef06dbbcfec9ef4aad4ed0bac9a9368.zip | |
Merged with rev597
Diffstat (limited to 'nova')
43 files changed, 1613 insertions, 256 deletions
diff --git a/nova/api/direct.py b/nova/api/direct.py index 81b3ae202..208b6d086 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -142,9 +142,15 @@ class Reflection(object): if argspec[2]: args_out.insert(0, ('**%s' % argspec[2],)) + if f.__doc__: + short_doc = f.__doc__.split('\n')[0] + doc = f.__doc__ + else: + short_doc = doc = _('not available') + methods['/%s/%s' % (route, k)] = { - 'short_doc': f.__doc__.split('\n')[0], - 'doc': f.__doc__, + 'short_doc': short_doc, + 'doc': doc, 'name': k, 'args': list(reversed(args_out))} @@ -196,6 +202,8 @@ class ServiceWrapper(wsgi.Controller): # TODO(termie): do some basic normalization on methods method = getattr(self.service_handle, action) + # NOTE(vish): make sure we have no unicode keys for py2.6. + params = dict([(str(k), v) for (k, v) in params.iteritems()]) result = method(context, **params) if type(result) is dict or type(result) is list: return self._serialize(result, req) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 57d41ed67..f63ec9085 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -59,7 +59,7 @@ def _gen_key(context, user_id, key_name): # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) - raise exception.Duplicate("The key_pair %s already exists" + raise exception.Duplicate(_("The key_pair %s already exists") % key_name) except exception.NotFound: pass @@ -133,7 +133,7 @@ class CloudController(object): return result def _get_availability_zone_by_host(self, context, host): - services = db.service_get_all_by_host(context, host) + services = db.service_get_all_by_host(context.elevated(), host) if len(services) > 0: return services[0]['availability_zone'] return 'unknown zone' @@ -729,7 +729,7 @@ class CloudController(object): ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): - instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] + instance_id = floating_ip_ref['fixed_ip']['instance']['id'] ec2_id = id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index 0eb6fe588..d8dad8edd 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -119,8 +119,8 @@ class DbDriver(object): for member_uid in member_uids: member = db.user_get(context.get_admin_context(), member_uid) if not member: - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" + raise exception.NotFound(_("Project can't be created " + "because user %s doesn't exist") % member_uid) members.add(member) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index bc53e0ec6..a6915ce03 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -146,7 +146,7 @@ class LdapDriver(object): def create_user(self, name, access_key, secret_key, is_admin): """Create a user""" if self.__user_exists(name): - raise exception.Duplicate("LDAP user %s already exists" % name) + raise exception.Duplicate(_("LDAP user %s already exists") % name) if FLAGS.ldap_user_modify_only: if self.__ldap_user_exists(name): # Retrieve user by name @@ -310,7 +310,7 @@ class LdapDriver(object): def delete_user(self, uid): """Delete a user""" if not self.__user_exists(uid): - raise exception.NotFound("User %s doesn't exist" % uid) + raise exception.NotFound(_("User %s doesn't exist") % uid) self.__remove_from_all(uid) if FLAGS.ldap_user_modify_only: # Delete attributes @@ -432,15 +432,15 @@ class LdapDriver(object): description, member_uids=None): """Create a group""" if self.__group_exists(group_dn): - raise exception.Duplicate("Group can't be created because " - "group %s already exists" % name) + raise exception.Duplicate(_("Group can't be created because " + "group %s already exists") % name) members = [] if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % - member_uid) + raise exception.NotFound(_("Group can't be created " + "because user %s doesn't exist") + % member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -455,8 +455,8 @@ class LdapDriver(object): def __is_in_group(self, uid, group_dn): """Check if user is in group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be searched in group " - "because the user doesn't exist" % uid) + raise exception.NotFound(_("User %s can't be searched in group " + "because the user doesn't exist") % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -467,10 +467,10 @@ class LdapDriver(object): def __add_to_group(self, uid, group_dn): """Add user to group""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be added to the group " - "because the user doesn't exist" % uid) + raise exception.NotFound(_("User %s can't be added to the group " + "because the user doesn't exist") % uid) if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % + raise exception.NotFound(_("The group at dn %s doesn't exist") % group_dn) if self.__is_in_group(uid, group_dn): raise exception.Duplicate(_("User %s is already a member of " @@ -481,15 +481,15 @@ class LdapDriver(object): def __remove_from_group(self, uid, group_dn): """Remove user from group""" if not self.__group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - group_dn) + raise exception.NotFound(_("The group at dn %s doesn't exist") + % group_dn) if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % - uid) + raise exception.NotFound(_("User %s can't be removed from the " + "group because the user doesn't exist") + % uid) if not self.__is_in_group(uid, group_dn): - raise exception.NotFound("User %s is not a member of the group" % - uid) + raise exception.NotFound(_("User %s is not a member of the group") + % uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member(group_dn, uid) for sub_dn in sub_dns: @@ -509,8 +509,9 @@ class LdapDriver(object): def __remove_from_all(self, uid): """Remove user from all roles and projects""" if not self.__user_exists(uid): - raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % uid) + raise exception.NotFound(_("User %s can't be removed from all " + "because the user doesn't exist") + % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: diff --git a/nova/compute/api.py b/nova/compute/api.py index a6b99c1cb..6a3fe08b6 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -246,13 +246,16 @@ class API(base.Base): # ..then we distill the security groups to which they belong.. security_groups = set() for rule in security_group_rules: - security_groups.add(rule['parent_group_id']) + security_group = self.db.security_group_get( + context, + rule['parent_group_id']) + security_groups.add(security_group) # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: for instance in security_group['instances']: - instances.add(instance['id']) + instances.add(instance) # ...then we find the hosts where they live... hosts = set() diff --git a/nova/console/manager.py b/nova/console/manager.py index c55ca8e8f..5697e7cb1 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -67,7 +67,7 @@ class ConsoleProxyManager(manager.Manager): pool['id'], instance_id) except exception.NotFound: - logging.debug("Adding console") + logging.debug(_("Adding console")) if not password: password = self.driver.generate_password() if not port: diff --git a/nova/console/xvp.py b/nova/console/xvp.py index 2a76223da..ee66dac46 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -96,7 +96,7 @@ class XVPConsoleProxy(object): return os.urandom(length * 2).encode('base64')[:length] def _rebuild_xvp_conf(self, context): - logging.debug("Rebuilding xvp conf") + logging.debug(_("Rebuilding xvp conf")) pools = [pool for pool in db.console_pool_get_all_by_host_type(context, self.host, self.console_type) @@ -113,12 +113,12 @@ class XVPConsoleProxy(object): self._xvp_restart() def _write_conf(self, config): - logging.debug('Re-wrote %s' % FLAGS.console_xvp_conf) + logging.debug(_('Re-wrote %s') % FLAGS.console_xvp_conf) with open(FLAGS.console_xvp_conf, 'w') as cfile: cfile.write(config) def _xvp_stop(self): - logging.debug("Stopping xvp") + logging.debug(_("Stopping xvp")) pid = self._xvp_pid() if not pid: return @@ -131,19 +131,19 @@ class XVPConsoleProxy(object): def _xvp_start(self): if self._xvp_check_running(): return - logging.debug("Starting xvp") + logging.debug(_("Starting xvp")) try: utils.execute('xvp -p %s -c %s -l %s' % (FLAGS.console_xvp_pid, FLAGS.console_xvp_conf, FLAGS.console_xvp_log)) except exception.ProcessExecutionError, err: - logging.error("Error starting xvp: %s" % err) + logging.error(_("Error starting xvp: %s") % err) def _xvp_restart(self): - logging.debug("Restarting xvp") + logging.debug(_("Restarting xvp")) if not self._xvp_check_running(): - logging.debug("xvp not running...") + logging.debug(_("xvp not running...")) self._xvp_start() else: pid = self._xvp_pid() diff --git a/nova/db/migration.py b/nova/db/migration.py new file mode 100644 index 000000000..e54b90cd8 --- /dev/null +++ b/nova/db/migration.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Database setup and migration commands.""" + +from nova import flags +from nova import utils + +FLAGS = flags.FLAGS +flags.DECLARE('db_backend', 'nova.db.api') + + +IMPL = utils.LazyPluggable(FLAGS['db_backend'], + sqlalchemy='nova.db.sqlalchemy.migration') + + +def db_sync(version=None): + """Migrate the database to `version` or the most recent version.""" + return IMPL.db_sync(version=version) + + +def db_version(): + """Display the current database version.""" + return IMPL.db_version() diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index 501373942..747015af5 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -15,31 +15,3 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -""" -SQLAlchemy database backend -""" -import time - -from sqlalchemy.exc import OperationalError - -from nova import flags -from nova import log as logging -from nova.db.sqlalchemy import models - - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.db.sqlalchemy') - - -for i in xrange(FLAGS.sql_max_retries): - if i > 0: - time.sleep(FLAGS.sql_retry_interval) - - try: - models.register_models() - break - except OperationalError: - LOG.exception(_("Data store %s is unreachable." - " Trying again in %d seconds."), - FLAGS.sql_connection, FLAGS.sql_retry_interval) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b63b84bed..7b965f672 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -777,7 +777,7 @@ def instance_get_by_id(context, instance_id): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -785,6 +785,7 @@ def instance_get_by_id(context, instance_id): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ diff --git a/nova/db/sqlalchemy/migrate_repo/README b/nova/db/sqlalchemy/migrate_repo/README new file mode 100644 index 000000000..6218f8cac --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/nova/db/sqlalchemy/migrate_repo/__init__.py b/nova/db/sqlalchemy/migrate_repo/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/__init__.py diff --git a/nova/db/sqlalchemy/migrate_repo/manage.py b/nova/db/sqlalchemy/migrate_repo/manage.py new file mode 100644 index 000000000..09e340f44 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +from migrate.versioning.shell import main +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/nova/db/sqlalchemy/migrate_repo/migrate.cfg b/nova/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100644 index 000000000..2c75fb763 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=nova + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py new file mode 100644 index 000000000..a312a7190 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -0,0 +1,547 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +## Table code mostly autogenerated by genmodel.py +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + +fixed_ips = Table('fixed_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('network_id', + Integer(), + ForeignKey('networks.id'), + nullable=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('allocated', Boolean(create_constraint=True, name=None)), + Column('leased', Boolean(create_constraint=True, name=None)), + Column('reserved', Boolean(create_constraint=True, name=None)), + ) + + +floating_ips = Table('floating_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fixed_ip_id', + Integer(), + ForeignKey('fixed_ips.id'), + nullable=True), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instances = Table('instances', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('internal_id', Integer()), + Column('admin_pass', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('image_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('kernel_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('ramdisk_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('launch_index', Integer()), + Column('key_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('key_data', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('state', Integer()), + Column('state_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('memory_mb', Integer()), + Column('vcpus', Integer()), + Column('local_gb', Integer()), + Column('hostname', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_data', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('reservation_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('mac_address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +iscsi_targets = Table('iscsi_targets', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('target_num', Integer()), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + +key_pairs = Table('key_pairs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fingerprint', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_key', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +networks = Table('networks', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('injected', Boolean(create_constraint=True, name=None)), + Column('cidr', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('netmask', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('bridge', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('gateway', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('broadcast', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dns', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vlan', Integer()), + Column('vpn_public_address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vpn_public_port', Integer()), + Column('vpn_private_address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dhcp_start', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +projects = Table('projects', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_manager', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id')), + ) + + +quotas = Table('quotas', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + ) + + +security_groups = Table('security_groups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +security_group_inst_assoc = Table('security_group_instance_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('security_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('instance_id', Integer(), ForeignKey('instances.id')), + ) + + +security_group_rules = Table('security_group_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('parent_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('protocol', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('group_id', + Integer(), + ForeignKey('security_groups.id')), + ) + + +services = Table('services', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('binary', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('topic', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('report_count', Integer(), nullable=False), + Column('disabled', Boolean(create_constraint=True, name=None)), + ) + + +users = Table('users', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('access_key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('secret_key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('is_admin', Boolean(create_constraint=True, name=None)), + ) + + +user_project_association = Table('user_project_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id'), + primary_key=True, + nullable=False), + ) + + +user_project_role_association = Table('user_project_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ForeignKeyConstraint(['user_id', + 'project_id'], + ['user_project_association.user_id', + 'user_project_association.project_id']), + ) + + +user_role_association = Table('user_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ) + + +volumes = Table('volumes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('ec2_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('size', Integer()), + Column('availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('mountpoint', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_time', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + for table in (auth_tokens, export_devices, fixed_ips, floating_ips, + instances, iscsi_targets, key_pairs, networks, + projects, quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, + user_project_association, user_project_role_association, + user_role_association, volumes): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + for table in (auth_tokens, export_devices, fixed_ips, floating_ips, + instances, iscsi_targets, key_pairs, networks, + projects, quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, + user_project_association, user_project_role_association, + user_role_association, volumes): + table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py new file mode 100644 index 000000000..bd3a3e6f8 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -0,0 +1,209 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +services = Table('services', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +networks = Table('networks', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# +certificates = Table('certificates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('file_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +consoles = Table('consoles', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', Integer()), + Column('password', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('port', Integer(), nullable=True), + Column('pool_id', + Integer(), + ForeignKey('console_pools.id')), + ) + + +console_pools = Table('console_pools', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('console_type', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_hostname', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('compute_host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instance_actions = Table('instance_actions', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id')), + Column('action', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('error', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# Tables to alter +# +auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instances_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +instances_locked = Column('locked', + Boolean(create_constraint=True, name=None)) + + +networks_cidr_v6 = Column( + 'cidr_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + +networks_ra_server = Column( + 'ra_server', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +services_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (certificates, consoles, console_pools, instance_actions): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + auth_tokens.c.user_id.alter(type=String(length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + instances.create_column(instances_availability_zone) + instances.create_column(instances_locked) + networks.create_column(networks_cidr_v6) + networks.create_column(networks_ra_server) + services.create_column(services_availability_zone) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/__init__.py b/nova/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/__init__.py diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py new file mode 100644 index 000000000..33d14827b --- /dev/null +++ b/nova/db/sqlalchemy/migration.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from nova import flags + +import sqlalchemy +from migrate.versioning import api as versioning_api +from migrate.versioning import exceptions as versioning_exceptions + +FLAGS = flags.FLAGS + + +def db_sync(version=None): + db_version() + repo_path = _find_migrate_repo() + return versioning_api.upgrade(FLAGS.sql_connection, repo_path, version) + + +def db_version(): + repo_path = _find_migrate_repo() + try: + return versioning_api.db_version(FLAGS.sql_connection, repo_path) + except versioning_exceptions.DatabaseNotControlledError: + # If we aren't version controlled we may already have the database + # in the state from before we started version control, check for that + # and set up version_control appropriately + meta = sqlalchemy.MetaData() + engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False) + meta.reflect(bind=engine) + try: + for table in ('auth_tokens', 'export_devices', 'fixed_ips', + 'floating_ips', 'instances', 'iscsi_targets', + 'key_pairs', 'networks', 'projects', 'quotas', + 'security_group_rules', + 'security_group_instance_association', 'services', + 'users', 'user_project_association', + 'user_project_role_association', 'volumes'): + assert table in meta.tables + return db_version_control(1) + except AssertionError: + return db_version_control(0) + + +def db_version_control(version=None): + repo_path = _find_migrate_repo() + versioning_api.version_control(FLAGS.sql_connection, repo_path, version) + return version + + +def _find_migrate_repo(): + """Get the path for the migrate repository.""" + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'migrate_repo') + assert os.path.exists(path) + return path diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index bf5e48b04..c54ebe3ba 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -100,51 +100,6 @@ class NovaBase(object): return local.iteritems() -# TODO(vish): Store images in the database instead of file system -#class Image(BASE, NovaBase): -# """Represents an image in the datastore""" -# __tablename__ = 'images' -# id = Column(Integer, primary_key=True) -# ec2_id = Column(String(12), unique=True) -# user_id = Column(String(255)) -# project_id = Column(String(255)) -# image_type = Column(String(255)) -# public = Column(Boolean, default=False) -# state = Column(String(255)) -# location = Column(String(255)) -# arch = Column(String(255)) -# default_kernel_id = Column(String(255)) -# default_ramdisk_id = Column(String(255)) -# -# @validates('image_type') -# def validate_image_type(self, key, image_type): -# assert(image_type in ['machine', 'kernel', 'ramdisk', 'raw']) -# -# @validates('state') -# def validate_state(self, key, state): -# assert(state in ['available', 'pending', 'disabled']) -# -# @validates('default_kernel_id') -# def validate_kernel_id(self, key, val): -# if val != 'machine': -# assert(val is None) -# -# @validates('default_ramdisk_id') -# def validate_ramdisk_id(self, key, val): -# if val != 'machine': -# assert(val is None) -# -# -# TODO(vish): To make this into its own table, we need a good place to -# create the host entries. In config somwhere? Or the first -# time any object sets host? This only becomes particularly -# important if we need to store per-host data. -#class Host(BASE, NovaBase): -# """Represents a host where services are running""" -# __tablename__ = 'hosts' -# id = Column(String(255), primary_key=True) - - class Service(BASE, NovaBase): """Represents a running service on a host.""" diff --git a/nova/network/manager.py b/nova/network/manager.py index 2a043cc6b..5d7589090 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -92,7 +92,7 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False, flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600, 'Seconds after which a deallocated ip is disassociated') -flags.DEFINE_bool('use_ipv6', True, +flags.DEFINE_bool('use_ipv6', False, 'use the ipv6') flags.DEFINE_string('network_host', socket.gethostname(), 'Network host to use for ip allocation in flat modes') @@ -211,7 +211,7 @@ class NetworkManager(manager.Manager): def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" - LOG.debug("Releasing IP %s", address, context=context) + LOG.debug(_("Releasing IP %s"), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index bc26fd3c5..43ed7ffe7 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -315,8 +315,10 @@ class ObjectResource(ErrorHandlingResource): context=request.context) if not self.bucket.is_authorized(request.context): - LOG.audit("Unauthorized attempt to delete object %s from " - "bucket %s", self.name, self.bucket.name, + LOG.audit(_("Unauthorized attempt to delete object " + "%(object)s from bucket %(bucket)s") % + {'object': self.name, + 'bucket': self.bucket.name}, context=request.context) raise exception.NotAuthorized() diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index abc28182e..41e0abd80 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -259,22 +259,25 @@ class Image(object): process_input=encrypted_key, check_exit_code=False) if err: - raise exception.Error("Failed to decrypt private key: %s" % err) + raise exception.Error(_("Failed to decrypt private key: %s") + % err) iv, err = utils.execute( 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, process_input=encrypted_iv, check_exit_code=False) if err: - raise exception.Error("Failed to decrypt initialization " - "vector: %s" % err) + raise exception.Error(_("Failed to decrypt initialization " + "vector: %s") % err) _out, err = utils.execute( 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename), check_exit_code=False) if err: - raise exception.Error("Failed to decrypt image file %s : %s" % - (encrypted_filename, err)) + raise exception.Error(_("Failed to decrypt image file " + "%(image_file)s: %(err)s") % + {'image_file': encrypted_filename, + 'err': err}) @staticmethod def untarzip_image(path, filename): diff --git a/nova/rpc.py b/nova/rpc.py index 49b11602b..bbfa71138 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -343,7 +343,7 @@ def call(context, topic, msg): def cast(context, topic, msg): """Sends a message on a topic without waiting for a response""" - LOG.debug("Making asynchronous cast...") + LOG.debug(_("Making asynchronous cast...")) _pack_context(msg, context) conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 47baf0d73..baf4966d4 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -48,7 +48,7 @@ class SimpleScheduler(chance.ChanceScheduler): service = db.service_get_by_args(context.elevated(), host, 'nova-compute') if not self.service_is_up(service): - raise driver.WillNotSchedule("Host %s is not alive" % host) + raise driver.WillNotSchedule(_("Host %s is not alive") % host) # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow @@ -80,7 +80,7 @@ class SimpleScheduler(chance.ChanceScheduler): service = db.service_get_by_args(context.elevated(), host, 'nova-volume') if not self.service_is_up(service): - raise driver.WillNotSchedule("Host %s not available" % host) + raise driver.WillNotSchedule(_("Host %s not available") % host) # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow diff --git a/nova/service.py b/nova/service.py index 8b2a22ce0..efc08fd63 100644 --- a/nova/service.py +++ b/nova/service.py @@ -209,19 +209,6 @@ class Service(object): self.model_disconnected = True logging.exception(_("model server went away")) - try: - # NOTE(vish): This is late-loaded to make sure that the - # database is not created before flags have - # been loaded. - from nova.db.sqlalchemy import models - models.register_models() - except OperationalError: - logging.exception(_("Data store %s is unreachable." - " Trying again in %d seconds.") % - (FLAGS.sql_connection, - FLAGS.sql_retry_interval)) - time.sleep(FLAGS.sql_retry_interval) - def serve(*services): FLAGS(sys.argv) diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 8dc87d0e2..592d5bea9 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -34,3 +34,8 @@ # The code below enables nosetests to work with i18n _() blocks import __builtin__ setattr(__builtin__, '_', lambda x: x) + + +def setup(): + from nova.db import migration + migration.db_sync() diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 7376a11dd..1097488ec 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -40,3 +40,4 @@ FLAGS.blades_per_shelf = 4 FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///nova.sqlite' +FLAGS.use_ipv6 = True diff --git a/nova/tests/glance/__init__.py b/nova/tests/glance/__init__.py new file mode 100644 index 000000000..ef9fa05a7 --- /dev/null +++ b/nova/tests/glance/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`glance` -- Stubs for Glance +================================= +""" diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py new file mode 100644 index 000000000..f182b857a --- /dev/null +++ b/nova/tests/glance/stubs.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import StringIO + +import glance.client + + +def stubout_glance_client(stubs, cls): + """Stubs out glance.client.Client""" + stubs.Set(glance.client, 'Client', + lambda *args, **kwargs: cls(*args, **kwargs)) + + +class FakeGlance(object): + def __init__(self, host, port=None, use_ssl=False): + pass + + def get_image(self, image): + meta = { + 'size': 0, + } + image_file = StringIO.StringIO('') + return meta, image_file diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index afdc89ba2..f6800e3d9 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -122,10 +122,10 @@ class LibvirtConnTestCase(test.TestCase): if rescue: check = (lambda t: t.find('./os/kernel').text.split('/')[1], - 'rescue-kernel') + 'kernel.rescue') check_list.append(check) check = (lambda t: t.find('./os/initrd').text.split('/')[1], - 'rescue-ramdisk') + 'ramdisk.rescue') check_list.append(check) else: if expect_kernel: @@ -161,13 +161,16 @@ class LibvirtConnTestCase(test.TestCase): if rescue: common_checks += [ (lambda t: t.findall('./devices/disk/source')[0].get( - 'file').split('/')[1], 'rescue-disk'), + 'file').split('/')[1], 'disk.rescue'), (lambda t: t.findall('./devices/disk/source')[1].get( 'file').split('/')[1], 'disk')] else: common_checks += [(lambda t: t.findall( './devices/disk/source')[0].get('file').split('/')[1], 'disk')] + common_checks += [(lambda t: t.findall( + './devices/disk/source')[1].get('file').split('/')[1], + 'disk.local')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type @@ -225,12 +228,6 @@ class IptablesFirewallTestCase(test.TestCase): self.manager.delete_user(self.user) super(IptablesFirewallTestCase, self).tearDown() - def _p(self, *args, **kwargs): - if 'iptables-restore' in args: - print ' '.join(args), kwargs['stdin'] - if 'iptables-save' in args: - return - in_rules = [ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', '*filter', @@ -252,11 +249,21 @@ class IptablesFirewallTestCase(test.TestCase): '# Completed on Mon Dec 6 11:54:13 2010', ] + in6_rules = [ + '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011', + '*filter', + ':INPUT ACCEPT [349155:75810423]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [349256:75777230]', + 'COMMIT', + '# Completed on Tue Jan 18 23:47:56 2011' + ] + def test_static_filters(self): - self.fw.execute = self._p instance_ref = db.instance_create(self.context, {'user_id': 'fake', - 'project_id': 'fake'}) + 'project_id': 'fake', + 'mac_address': '56:12:12:12:12:12'}) ip = '10.11.12.13' network_ref = db.project_get_network(self.context, @@ -301,18 +308,31 @@ class IptablesFirewallTestCase(test.TestCase): secgroup['id']) instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) - self.fw.add_instance(instance_ref) - - out_rules = self.fw.modify_rules(self.in_rules) +# self.fw.add_instance(instance_ref) + def fake_iptables_execute(cmd, process_input=None): + if cmd == 'sudo ip6tables-save -t filter': + return '\n'.join(self.in6_rules), None + if cmd == 'sudo iptables-save -t filter': + return '\n'.join(self.in_rules), None + if cmd == 'sudo iptables-restore': + self.out_rules = process_input.split('\n') + return '', '' + if cmd == 'sudo ip6tables-restore': + self.out6_rules = process_input.split('\n') + return '', '' + self.fw.execute = fake_iptables_execute + + self.fw.prepare_instance_filter(instance_ref) + self.fw.apply_instance_filter(instance_ref) in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) for rule in in_rules: if not 'nova' in rule: - self.assertTrue(rule in out_rules, + self.assertTrue(rule in self.out_rules, 'Rule went missing: %s' % rule) instance_chain = None - for rule in out_rules: + for rule in self.out_rules: # This is pretty crude, but it'll do for now if '-d 10.11.12.13 -j' in rule: instance_chain = rule.split(' ')[-1] @@ -320,7 +340,7 @@ class IptablesFirewallTestCase(test.TestCase): self.assertTrue(instance_chain, "The instance chain wasn't added") security_group_chain = None - for rule in out_rules: + for rule in self.out_rules: # This is pretty crude, but it'll do for now if '-A %s -j' % instance_chain in rule: security_group_chain = rule.split(' ')[-1] @@ -329,16 +349,16 @@ class IptablesFirewallTestCase(test.TestCase): "The security group chain wasn't added") self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \ - security_group_chain in out_rules, + security_group_chain in self.out_rules, "ICMP acceptance rule wasn't added") - self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type' - ' 8 -j ACCEPT' % security_group_chain in out_rules, + self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type ' + '8 -j ACCEPT' % security_group_chain in self.out_rules, "ICMP Echo Request acceptance rule wasn't added") self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport ' '--dports 80:81 -j ACCEPT' % security_group_chain \ - in out_rules, + in self.out_rules, "TCP port 80/81 acceptance rule wasn't added") @@ -473,5 +493,6 @@ class NWFilterTestCase(test.TestCase): self.fw.setup_basic_filtering(instance) self.fw.prepare_instance_filter(instance) + self.fw.apply_instance_filter(instance) _ensure_all_called() self.teardown_security_group() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 261ee0fde..9f5b266f3 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -34,6 +34,7 @@ from nova.virt.xenapi import volume_utils from nova.virt.xenapi.vmops import SimpleDH from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs +from nova.tests.glance import stubs as glance_stubs FLAGS = flags.FLAGS @@ -108,18 +109,16 @@ class XenAPIVolumeTestCase(test.TestCase): conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.values) - xenapi_fake.create_vm(instance.name, 'Running') + vm = xenapi_fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc') def check(): # check that the VM has a VBD attached to it - # Get XenAPI reference for the VM - vms = xenapi_fake.get_all('VM') # Get XenAPI record for VBD vbds = xenapi_fake.get_all('VBD') vbd = xenapi_fake.get_record('VBD', vbds[0]) vm_ref = vbd['VM'] - self.assertEqual(vm_ref, vms[0]) + self.assertEqual(vm_ref, vm) check() @@ -157,9 +156,14 @@ class XenAPIVMTestCase(test.TestCase): FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' xenapi_fake.reset() + xenapi_fake.create_local_srs() db_fakes.stub_out_db_instance_api(self.stubs) xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + stubs.stubout_get_this_vm_uuid(self.stubs) + stubs.stubout_stream_disk(self.stubs) + glance_stubs.stubout_glance_client(self.stubs, + glance_stubs.FakeGlance) self.conn = xenapi_conn.get_connection(False) def test_list_instances_0(self): @@ -207,40 +211,70 @@ class XenAPIVMTestCase(test.TestCase): check() - def test_spawn(self): - instance = self._create_instance() + def check_vm_record(self, conn): + instances = conn.list_instances() + self.assertEquals(instances, [1]) + + # Get Nova record for VM + vm_info = conn.get_info(1) + + # Get XenAPI record for VM + vms = [rec for ref, rec + in xenapi_fake.get_all_records('VM').iteritems() + if not rec['is_control_domain']] + vm = vms[0] + + # Check that m1.large above turned into the right thing. + instance_type = instance_types.INSTANCE_TYPES['m1.large'] + mem_kib = long(instance_type['memory_mb']) << 10 + mem_bytes = str(mem_kib << 10) + vcpus = instance_type['vcpus'] + self.assertEquals(vm_info['max_mem'], mem_kib) + self.assertEquals(vm_info['mem'], mem_kib) + self.assertEquals(vm['memory_static_max'], mem_bytes) + self.assertEquals(vm['memory_dynamic_max'], mem_bytes) + self.assertEquals(vm['memory_dynamic_min'], mem_bytes) + self.assertEquals(vm['VCPUs_max'], str(vcpus)) + self.assertEquals(vm['VCPUs_at_startup'], str(vcpus)) + + # Check that the VM is running according to Nova + self.assertEquals(vm_info['state'], power_state.RUNNING) + + # Check that the VM is running according to XenAPI. + self.assertEquals(vm['power_state'], 'Running') + + def _test_spawn(self, image_id, kernel_id, ramdisk_id): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + values = {'name': 1, + 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': image_id, + 'kernel_id': kernel_id, + 'ramdisk_id': ramdisk_id, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } + conn = xenapi_conn.get_connection(False) + instance = db.instance_create(values) + conn.spawn(instance) + self.check_vm_record(conn) - def check(): - instances = self.conn.list_instances() - self.assertEquals(instances, [1]) - - # Get Nova record for VM - vm_info = self.conn.get_info(1) - - # Get XenAPI record for VM - vms = xenapi_fake.get_all('VM') - vm = xenapi_fake.get_record('VM', vms[0]) - - # Check that m1.large above turned into the right thing. - instance_type = instance_types.INSTANCE_TYPES['m1.large'] - mem_kib = long(instance_type['memory_mb']) << 10 - mem_bytes = str(mem_kib << 10) - vcpus = instance_type['vcpus'] - self.assertEquals(vm_info['max_mem'], mem_kib) - self.assertEquals(vm_info['mem'], mem_kib) - self.assertEquals(vm['memory_static_max'], mem_bytes) - self.assertEquals(vm['memory_dynamic_max'], mem_bytes) - self.assertEquals(vm['memory_dynamic_min'], mem_bytes) - self.assertEquals(vm['VCPUs_max'], str(vcpus)) - self.assertEquals(vm['VCPUs_at_startup'], str(vcpus)) - - # Check that the VM is running according to Nova - self.assertEquals(vm_info['state'], power_state.RUNNING) - - # Check that the VM is running according to XenAPI. - self.assertEquals(vm['power_state'], 'Running') + def test_spawn_raw_objectstore(self): + FLAGS.xenapi_image_service = 'objectstore' + self._test_spawn(1, None, None) - check() + def test_spawn_objectstore(self): + FLAGS.xenapi_image_service = 'objectstore' + self._test_spawn(1, 2, 3) + + def test_spawn_raw_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(1, None, None) + + def test_spawn_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(1, 2, 3) def tearDown(self): super(XenAPIVMTestCase, self).tearDown() diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 292bd9ba9..624995ada 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -115,6 +115,21 @@ def stub_out_get_target(stubs): stubs.Set(volume_utils, '_get_target', fake_get_target) +def stubout_get_this_vm_uuid(stubs): + def f(): + vms = [rec['uuid'] for ref, rec + in fake.get_all_records('VM').iteritems() + if rec['is_control_domain']] + return vms[0] + stubs.Set(vm_utils, 'get_this_vm_uuid', f) + + +def stubout_stream_disk(stubs): + def f(_1, _2, _3, _4): + pass + stubs.Set(vm_utils, '_stream_disk', f) + + class FakeSessionForVMTests(fake.SessionBase): """ Stubs out a XenAPISession for VM tests """ def __init__(self, uri): @@ -124,7 +139,10 @@ class FakeSessionForVMTests(fake.SessionBase): return self.xenapi.network.get_all_records() def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' + sr_ref = fake.get_all('SR')[0] + vdi_ref = fake.create_vdi('', False, sr_ref, False) + vdi_rec = fake.get_record('VDI', vdi_ref) + return '<string>%s</string>' % vdi_rec['uuid'] def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref) @@ -159,10 +177,6 @@ class FakeSessionForVolumeTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVolumeTests, self).__init__(uri) - def VBD_plug(self, _1, ref): - rec = fake.get_record('VBD', ref) - rec['currently-attached'] = True - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): valid_vdi = False diff --git a/nova/twistd.py b/nova/twistd.py index 556271999..6390a8144 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -156,7 +156,7 @@ def WrapTwistedOptions(wrapped): try: self.parseArgs(*argv) except TypeError: - raise usage.UsageError("Wrong number of arguments.") + raise usage.UsageError(_("Wrong number of arguments.")) self.postOptions() return args @@ -220,7 +220,7 @@ def stop(pidfile): time.sleep(0.1) except OSError, err: err = str(err) - if err.find("No such process") > 0: + if err.find(_("No such process")) > 0: if os.path.exists(pidfile): os.remove(pidfile) else: diff --git a/nova/virt/fake.py b/nova/virt/fake.py index a57a8f43b..f8b3c7807 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -310,6 +310,54 @@ class FakeConnection(object): 'username': 'fakeuser', 'password': 'fakepassword'} + def refresh_security_group_rules(self, security_group_id): + """This method is called after a change to security groups. + + All security groups and their associated rules live in the datastore, + and calling this method should apply the updated rules to instances + running the specified security group. + + An error should be raised if the operation cannot complete. + + """ + return True + + def refresh_security_group_members(self, security_group_id): + """This method is called when a security group is added to an instance. + + This message is sent to the virtualization drivers on hosts that are + running an instance that belongs to a security group that has a rule + that references the security group identified by `security_group_id`. + It is the responsiblity of this method to make sure any rules + that authorize traffic flow with members of the security group are + updated and any new members can communicate, and any removed members + cannot. + + Scenario: + * we are running on host 'H0' and we have an instance 'i-0'. + * instance 'i-0' is a member of security group 'speaks-b' + * group 'speaks-b' has an ingress rule that authorizes group 'b' + * another host 'H1' runs an instance 'i-1' + * instance 'i-1' is a member of security group 'b' + + When 'i-1' launches or terminates we will recieve the message + to update members of group 'b', at which time we will make + any changes needed to the rules for instance 'i-0' to allow + or deny traffic coming from 'i-1', depending on if it is being + added or removed from the group. + + In this scenario, 'i-1' could just as easily have been running on our + host 'H0' and this method would still have been called. The point was + that this method isn't called on the host where instances of that + group are running (as is the case with + :method:`refresh_security_group_rules`) but is called where references + are made to authorizing those instances. + + An error should be raised if the operation cannot complete. + + """ + return True + class FakeInstance(object): diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index 3ec82e403..88bfbc668 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -18,10 +18,10 @@ #set $disk_prefix = 'vd' #set $disk_bus = 'virtio' <type>hvm</type> - #end if + #end if #if $getVar('rescue', False) - <kernel>${basepath}/rescue-kernel</kernel> - <initrd>${basepath}/rescue-ramdisk</initrd> + <kernel>${basepath}/kernel.rescue</kernel> + <initrd>${basepath}/ramdisk.rescue</initrd> #else #if $getVar('kernel', None) <kernel>${kernel}</kernel> @@ -47,7 +47,7 @@ #if $getVar('rescue', False) <disk type='file'> <driver type='${driver_type}'/> - <source file='${basepath}/rescue-disk'/> + <source file='${basepath}/disk.rescue'/> <target dev='${disk_prefix}a' bus='${disk_bus}'/> </disk> <disk type='file'> @@ -64,7 +64,7 @@ #if $getVar('local', False) <disk type='file'> <driver type='${driver_type}'/> - <source file='${basepath}/local'/> + <source file='${basepath}/disk.local'/> <target dev='${disk_prefix}b' bus='${disk_bus}'/> </disk> #end if diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 55c193e20..8ad83731f 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -350,7 +350,7 @@ class LibvirtConnection(object): rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(instance, xml, 'rescue-', rescue_images) + self._create_image(instance, xml, '.rescue', rescue_images) self._conn.createXML(xml, 0) timer = utils.LoopingCall(f=None) @@ -532,23 +532,23 @@ class LibvirtConnection(object): utils.execute('truncate %s -s %dG' % (target, local_gb)) # TODO(vish): should we format disk by default? - def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None): + def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None): # syntactic nicety - def basepath(fname='', prefix=prefix): + def basepath(fname='', suffix=suffix): return os.path.join(FLAGS.instances_path, inst['name'], - prefix + fname) + fname + suffix) # ensure directories exist and are writable - utils.execute('mkdir -p %s' % basepath(prefix='')) - utils.execute('chmod 0777 %s' % basepath(prefix='')) + utils.execute('mkdir -p %s' % basepath(suffix='')) + utils.execute('chmod 0777 %s' % basepath(suffix='')) LOG.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() - # NOTE(vish): No need add the prefix to console.log + # NOTE(vish): No need add the suffix to console.log os.close(os.open(basepath('console.log', ''), os.O_CREAT | os.O_WRONLY, 0660)) @@ -577,7 +577,7 @@ class LibvirtConnection(object): root_fname = disk_images['image_id'] size = FLAGS.minimum_root_size - if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': + if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue': size = None root_fname += "_sm" @@ -593,7 +593,7 @@ class LibvirtConnection(object): if type_data['local_gb']: self._cache_image(fn=self._create_local, - target=basepath('local'), + target=basepath('disk.local'), fname="local_%s" % type_data['local_gb'], cow=FLAGS.use_cow_images, local_gb=type_data['local_gb']) @@ -733,7 +733,8 @@ class LibvirtConnection(object): 'cpu_time': cpu_time} def get_diagnostics(self, instance_name): - raise exception.APIError("diagnostics are not supported for libvirt") + raise exception.APIError(_("diagnostics are not supported " + "for libvirt")) def get_disks(self, instance_name): """ @@ -1133,6 +1134,10 @@ class NWFilterFirewall(FirewallDriver): return + def apply_instance_filter(self, instance): + """No-op. Everything is done in prepare_instance_filter""" + pass + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 96d8f5fc8..4bfaf4b57 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -76,6 +76,7 @@ def reset(): for c in _CLASSES: _db_content[c] = {} create_host('fake') + create_vm('fake', 'Running', is_a_template=False, is_control_domain=True) def create_host(name_label): @@ -136,14 +137,21 @@ def create_vdi(name_label, read_only, sr_ref, sharable): def create_vbd(vm_ref, vdi_ref): - vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref} + vbd_rec = { + 'VM': vm_ref, + 'VDI': vdi_ref, + 'currently_attached': False, + } vbd_ref = _create_object('VBD', vbd_rec) after_VBD_create(vbd_ref, vbd_rec) return vbd_ref def after_VBD_create(vbd_ref, vbd_rec): - """Create backref from VM to VBD when VBD is created""" + """Create read-only fields and backref from VM to VBD when VBD is + created.""" + vbd_rec['currently_attached'] = False + vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] vm_rec['VBDs'] = [vbd_ref] @@ -152,9 +160,10 @@ def after_VBD_create(vbd_ref, vbd_rec): vbd_rec['vm_name_label'] = vm_name_label -def create_pbd(config, sr_ref, attached): +def create_pbd(config, host_ref, sr_ref, attached): return _create_object('PBD', { 'device-config': config, + 'host': host_ref, 'SR': sr_ref, 'currently-attached': attached, }) @@ -167,6 +176,33 @@ def create_task(name_label): }) +def create_local_srs(): + """Create an SR that looks like the one created on the local disk by + default by the XenServer installer. Do this one per host.""" + for host_ref in _db_content['host'].keys(): + _create_local_sr(host_ref) + + +def _create_local_sr(host_ref): + sr_ref = _create_object('SR', { + 'name_label': 'Local storage', + 'type': 'lvm', + 'content_type': 'user', + 'shared': False, + 'physical_size': str(1 << 30), + 'physical_utilisation': str(0), + 'virtual_allocation': str(0), + 'other_config': { + 'i18n-original-value-name_label': 'Local storage', + 'i18n-key': 'local-storage', + }, + 'VDIs': [] + }) + pbd_ref = create_pbd('', host_ref, sr_ref, True) + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + return sr_ref + + def _create_object(table, obj): ref = str(uuid.uuid4()) obj['uuid'] = str(uuid.uuid4()) @@ -179,9 +215,10 @@ def _create_sr(table, obj): # Forces fake to support iscsi only if sr_type != 'iscsi': raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) + host_ref = _db_content['host'].keys()[0] sr_ref = _create_object(table, obj[2]) vdi_ref = create_vdi('', False, sr_ref, False) - pbd_ref = create_pbd('', sr_ref, True) + pbd_ref = create_pbd('', host_ref, sr_ref, True) _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] _db_content['VDI'][vdi_ref]['SR'] = sr_ref @@ -233,6 +270,20 @@ class SessionBase(object): def __init__(self, uri): self._session = None + def VBD_plug(self, _1, ref): + rec = get_record('VBD', ref) + if rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_ATTACHED', ref]) + rec['currently_attached'] = True + rec['device'] = rec['userdevice'] + + def VBD_unplug(self, _1, ref): + rec = get_record('VBD', ref) + if not rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_DETACHED', ref]) + rec['currently_attached'] = False + rec['device'] = '' + def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) @@ -289,6 +340,8 @@ class SessionBase(object): return lambda *params: self._getter(name, params) elif self._is_create(name): return lambda *params: self._create(name, params) + elif self._is_destroy(name): + return lambda *params: self._destroy(name, params) else: return None @@ -299,10 +352,16 @@ class SessionBase(object): bits[1].startswith(getter and 'get_' or 'set_')) def _is_create(self, name): + return self._is_method(name, 'create') + + def _is_destroy(self, name): + return self._is_method(name, 'destroy') + + def _is_method(self, name, meth): bits = name.split('.') return (len(bits) == 2 and bits[0] in _CLASSES and - bits[1] == 'create') + bits[1] == meth) def _getter(self, name, params): self._check_session(params) @@ -370,10 +429,9 @@ class SessionBase(object): _create_sr(cls, params) or _create_object(cls, params[1]) # Call hook to provide any fixups needed (ex. creating backrefs) - try: - globals()["after_%s_create" % cls](ref, params[1]) - except KeyError: - pass + after_hook = 'after_%s_create' % cls + if after_hook in globals(): + globals()[after_hook](ref, params[1]) obj = get_record(cls, ref) @@ -383,6 +441,15 @@ class SessionBase(object): return ref + def _destroy(self, name, params): + self._check_session(params) + self._check_arg_count(params, 2) + table, _ = name.split('.') + ref = params[1] + if ref not in _db_content[table]: + raise Failure(['HANDLE_INVALID', table, ref]) + del _db_content[table][ref] + def _async(self, name, params): task_ref = create_task(name) task = _db_content['task'][task_ref] @@ -420,7 +487,7 @@ class SessionBase(object): try: return result[0] except IndexError: - return None + raise Failure(['UUID_INVALID', v, result, recs, k]) return result diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index eb0393d2a..b80ff4dba 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,11 +19,14 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import os import pickle +import re import urllib from xml.dom import minidom from eventlet import event +import glance.client from nova import exception from nova import flags from nova import log as logging @@ -47,17 +50,23 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE +KERNEL_DIR = '/boot/guest' + + class ImageType: - """ - Enumeration class for distinguishing different image types - 0 - kernel/ramdisk image (goes on dom0's filesystem) - 1 - disk image (local SR, partitioned by objectstore plugin) - 2 - raw disk image (local SR, NOT partitioned by plugin) - """ + """ + Enumeration class for distinguishing different image types + 0 - kernel/ramdisk image (goes on dom0's filesystem) + 1 - disk image (local SR, partitioned by objectstore plugin) + 2 - raw disk image (local SR, NOT partitioned by plugin) + """ - KERNEL_RAMDISK = 0 - DISK = 1 - DISK_RAW = 2 + KERNEL_RAMDISK = 0 + DISK = 1 + DISK_RAW = 2 class VMHelper(HelperBase): @@ -207,6 +216,25 @@ class VMHelper(HelperBase): return vif_ref @classmethod + def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): + """Create a VDI record and returns its reference.""" + vdi_ref = session.get_xenapi().VDI.create( + {'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': []}) + LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, + name_label, virtual_size, read_only, sr_ref) + return vdi_ref + + @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, Snapshot VHD @@ -256,15 +284,71 @@ class VMHelper(HelperBase): def fetch_image(cls, session, instance_id, image, user, project, type): """ type is interpreted as an ImageType instance + Related flags: + xenapi_image_service = ['glance', 'objectstore'] + glance_address = 'address for glance services' + glance_port = 'port for glance services' """ - url = images.image_url(image) access = AuthManager().get_access_key(user, project) + + if FLAGS.xenapi_image_service == 'glance': + return cls._fetch_image_glance(session, instance_id, image, + access, type) + else: + return cls._fetch_image_objectstore(session, instance_id, image, + access, user.secret, type) + + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, type): + sr = find_sr(session) + if sr is None: + raise exception.NotFound('Cannot find SR to write VDI to') + + c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + + meta, image_file = c.get_image(image) + virtual_size = int(meta['size']) + vdi_size = virtual_size + LOG.debug(_("Size for image %s:%d"), image, virtual_size) + if type == ImageType.DISK: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, + vdi_size, False) + + with_vdi_attached_here(session, vdi, False, + lambda dev: + _stream_disk(dev, type, + virtual_size, image_file)) + if (type == ImageType.KERNEL_RAMDISK): + #we need to invoke a plugin for copying VDI's + #content into proper path + LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) + fn = "copy_kernel_vdi" + args = {} + args['vdi-ref'] = vdi + #let the plugin copy the correct number of bytes + args['image-size'] = str(vdi_size) + task = session.async_call_plugin('glance', fn, args) + filename = session.wait_for_task(instance_id, task) + #remove the VDI as it is not needed anymore + session.get_xenapi().VDI.destroy(vdi) + LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi) + return filename + else: + return session.get_xenapi().VDI.get_uuid(vdi) + + @classmethod + def _fetch_image_objectstore(cls, session, instance_id, image, access, + secret, type): + url = images.image_url(image) LOG.debug(_("Asking xapi to fetch %s as %s"), url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url args['username'] = access - args['password'] = user.secret + args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' if type != ImageType.KERNEL_RAMDISK: @@ -276,14 +360,21 @@ class VMHelper(HelperBase): return uuid @classmethod - def lookup_image(cls, session, vdi_ref): + def lookup_image(cls, session, instance_id, vdi_ref): + if FLAGS.xenapi_image_service == 'glance': + return cls._lookup_image_glance(session, vdi_ref) + else: + return cls._lookup_image_objectstore(session, instance_id, vdi_ref) + + @classmethod + def _lookup_image_objectstore(cls, session, instance_id, vdi_ref): LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) fn = "is_vdi_pv" args = {} args['vdi-ref'] = vdi_ref - #TODO: Call proper function in plugin task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(task) + pv_str = session.wait_for_task(instance_id, task) + pv = None if pv_str.lower() == 'true': pv = True elif pv_str.lower() == 'false': @@ -292,6 +383,23 @@ class VMHelper(HelperBase): return pv @classmethod + def _lookup_image_glance(cls, session, vdi_ref): + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) + + def is_vdi_pv(dev): + LOG.debug(_("Running pygrub against %s"), dev) + output = os.popen('pygrub -qn /dev/%s' % dev) + for line in output.readlines(): + #try to find kernel string + m = re.search('(?<=kernel:)/.*(?:>)', line) + if m and m.group(0).find('xen') != -1: + LOG.debug(_("Found Xen kernel %s") % m.group(0)) + return True + LOG.debug(_("No Xen kernel found. Booting HVM.")) + return False + return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv) + + @classmethod def lookup(cls, session, i): """Look the instance i up, and returns it if available""" vms = session.get_xenapi().VM.get_by_name_label(i) @@ -464,3 +572,123 @@ def get_vdi_for_vm_safely(session, vm_ref): vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) return vdi_ref, vdi_rec + + +def find_sr(session): + host = session.get_xenapi_host() + srs = session.get_xenapi().SR.get_all() + for sr in srs: + sr_rec = session.get_xenapi().SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.get_xenapi().PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def with_vdi_attached_here(session, vdi, read_only, f): + this_vm_ref = get_this_vm_ref(session) + vbd_rec = {} + vbd_rec['VM'] = this_vm_ref + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + LOG.debug(_('Creating VBD for VDI %s ... '), vdi) + vbd = session.get_xenapi().VBD.create(vbd_rec) + LOG.debug(_('Creating VBD for VDI %s done.'), vdi) + try: + LOG.debug(_('Plugging VBD %s ... '), vbd) + session.get_xenapi().VBD.plug(vbd) + LOG.debug(_('Plugging VBD %s done.'), vbd) + return f(session.get_xenapi().VBD.get_device(vbd)) + finally: + LOG.debug(_('Destroying VBD for VDI %s ... '), vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.get_xenapi().VBD.destroy, vbd) + LOG.debug(_('Destroying VBD for VDI %s done.'), vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.get_xenapi().VBD.unplug(vbd) + LOG.debug(_('VBD.unplug successful first time.')) + return + except VMHelper.XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + LOG.debug(_('VBD.unplug rejected: retrying...')) + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + LOG.debug(_('VBD.unplug successful eventually.')) + return + else: + LOG.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), + e) + return + + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except VMHelper.XenAPI.Failure, e: + LOG.error(_('Ignoring XenAPI.Failure %s'), e) + return None + + +def get_this_vm_uuid(): + with file('/sys/hypervisor/uuid') as f: + return f.readline().strip() + + +def get_this_vm_ref(session): + return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) + + +def _stream_disk(dev, type, virtual_size, image_file): + offset = 0 + if type == ImageType.DISK: + offset = MBR_SIZE_BYTES + _write_partition(virtual_size, dev) + + with open('/dev/%s' % dev, 'wb') as f: + f.seek(offset) + for chunk in image_file: + f.write(chunk) + + +def _write_partition(virtual_size, dev): + dest = '/dev/%s' % dev + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + LOG.debug(_('Writing partition table %d %d to %s...'), + primary_first, primary_last, dest) + + def execute(cmd, process_input=None, check_exit_code=True): + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) + + execute('parted --script %s mklabel msdos' % dest) + execute('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + + LOG.debug(_('Writing partition table %s done.'), dest) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5e414bab4..6c2fd6a68 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -85,7 +85,8 @@ class VMOps(object): #Have a look at the VDI and see if it has a PV kernel pv_kernel = False if not instance.kernel_id: - pv_kernel = VMHelper.lookup_image(self._session, vdi_ref) + pv_kernel = VMHelper.lookup_image(self._session, instance.id, + vdi_ref) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c98310dbc..c57c883c9 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -89,6 +89,9 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') +flags.DEFINE_string('xenapi_image_service', + 'glance', + 'Where to get VM images: glance or objectstore.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, 'The interval used for polling of coalescing vhds.' diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 71fe18a40..da7307733 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -47,7 +47,7 @@ flags.DEFINE_integer('iscsi_num_targets', 'Number of iscsi target ids per host') flags.DEFINE_string('iscsi_target_prefix', 'iqn.2010-10.org.openstack:', 'prefix for iscsi volumes') -flags.DEFINE_string('iscsi_ip_prefix', '127.0', +flags.DEFINE_string('iscsi_ip_prefix', '$my_ip', 'discover volumes on the ip that starts with this prefix') flags.DEFINE_string('rbd_pool', 'rbd', 'the rbd pool in which volumes are stored') @@ -100,6 +100,14 @@ class VolumeDriver(object): def delete_volume(self, volume): """Deletes a logical volume.""" + try: + self._try_execute("sudo lvdisplay %s/%s" % + (FLAGS.volume_group, + volume['name'])) + except Exception as e: + # If the volume isn't present, then don't attempt to delete + return True + self._try_execute("sudo lvremove -f %s/%s" % (FLAGS.volume_group, volume['name'])) @@ -218,8 +226,14 @@ class ISCSIDriver(VolumeDriver): def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" - iscsi_target = self.db.volume_get_iscsi_target_num(context, + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) self._sync_exec("sudo ietadm --op new " @@ -258,8 +272,23 @@ class ISCSIDriver(VolumeDriver): def remove_export(self, context, volume): """Removes an export for a logical volume.""" - iscsi_target = self.db.volume_get_iscsi_target_num(context, + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + try: + # ietadm show will exit with an error + # this export has already been removed + self._execute("sudo ietadm --op show --tid=%s " % iscsi_target) + except Exception as e: + LOG.info(_("Skipping remove_export. No iscsi_target " + + "is presently exported for volume: %d"), volume['id']) + return + self._execute("sudo ietadm --op delete --tid=%s " "--lun=0" % iscsi_target) self._execute("sudo ietadm --op delete --tid=%s" % @@ -285,7 +314,8 @@ class ISCSIDriver(VolumeDriver): self._execute("sudo iscsiadm -m node -T %s -p %s --op update " "-n node.startup -v automatic" % (iscsi_name, iscsi_portal)) - return "/dev/iscsi/%s" % volume['name'] + return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal, + iscsi_name) def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 6348539c5..82e3521a8 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -84,7 +84,10 @@ class VolumeManager(manager.Manager): volumes = self.db.volume_get_all_by_host(ctxt, self.host) LOG.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: - self.driver.ensure_export(ctxt, volume) + if volume['status'] in ['available', 'in-use']: + self.driver.ensure_export(ctxt, volume) + else: + LOG.info(_("volume %s: skipping export"), volume_ref['name']) def create_volume(self, context, volume_id): """Creates and exports the volume.""" @@ -99,12 +102,18 @@ class VolumeManager(manager.Manager): # before passing it to the driver. volume_ref['host'] = self.host - LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'], - volume_ref['size']) - self.driver.create_volume(volume_ref) + try: + LOG.debug(_("volume %s: creating lv of size %sG"), + volume_ref['name'], + volume_ref['size']) + self.driver.create_volume(volume_ref) - LOG.debug(_("volume %s: creating export"), volume_ref['name']) - self.driver.create_export(context, volume_ref) + LOG.debug(_("volume %s: creating export"), volume_ref['name']) + self.driver.create_export(context, volume_ref) + except Exception as e: + self.db.volume_update(context, + volume_ref['id'], {'status': 'error'}) + raise e now = datetime.datetime.utcnow() self.db.volume_update(context, @@ -121,10 +130,18 @@ class VolumeManager(manager.Manager): raise exception.Error(_("Volume is still attached")) if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) - LOG.debug(_("volume %s: removing export"), volume_ref['name']) - self.driver.remove_export(context, volume_ref) - LOG.debug(_("volume %s: deleting"), volume_ref['name']) - self.driver.delete_volume(volume_ref) + + try: + LOG.debug(_("volume %s: removing export"), volume_ref['name']) + self.driver.remove_export(context, volume_ref) + LOG.debug(_("volume %s: deleting"), volume_ref['name']) + self.driver.delete_volume(volume_ref) + except Exception as e: + self.db.volume_update(context, + volume_ref['id'], + {'status': 'error_deleting'}) + raise e + self.db.volume_destroy(context, volume_id) LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True diff --git a/nova/wsgi.py b/nova/wsgi.py index 4f5307d80..a48bede9c 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -143,7 +143,7 @@ class Application(object): See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ - raise NotImplementedError("You must implement __call__") + raise NotImplementedError(_("You must implement __call__")) class Middleware(Application): |
