diff options
Diffstat (limited to 'nova')
27 files changed, 1071 insertions, 151 deletions
diff --git a/nova/__init__.py b/nova/__init__.py index 8745617bc..256db55a9 100644 --- a/nova/__init__.py +++ b/nova/__init__.py @@ -30,5 +30,3 @@ .. moduleauthor:: Manish Singh <yosh@gimp.org> .. moduleauthor:: Andy Smith <andy@anarkystic.com> """ - -from exception import * diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 99b6d5cb6..7458d307a 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -198,8 +198,9 @@ class CloudController(object): return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): - enabled_services = db.service_get_all(context) - disabled_services = db.service_get_all(context, True) + ctxt = context.elevated() + enabled_services = db.service_get_all(ctxt) + disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: @@ -318,14 +319,19 @@ class CloudController(object): def describe_security_groups(self, context, group_name=None, **kwargs): self.compute_api.ensure_default_security_group(context) - if context.is_admin: + if group_name: + groups = [] + for name in group_name: + group = db.security_group_get_by_name(context, + context.project_id, + name) + groups.append(group) + elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] - if not group_name is None: - groups = [g for g in groups if g.name in group_name] return {'securityGroupInfo': list(sorted(groups, @@ -670,7 +676,8 @@ class CloudController(object): instances = [] for ec2_id in instance_id: internal_id = ec2_id_to_id(ec2_id) - instance = self.compute_api.get(context, internal_id) + instance = self.compute_api.get(context, + instance_id=internal_id) instances.append(instance) else: instances = self.compute_api.get_all(context, **kwargs) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index c3fe0cc8c..1dfdd5318 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -120,8 +120,8 @@ class AuthMiddleware(wsgi.Middleware): req - webob.Request object """ ctxt = context.get_admin_context() - user = self.auth.get_user_from_access_key(username) - if user and user.secret == key: + user = self.auth.get_user_from_access_key(key) + if user and user.name == username: token_hash = hashlib.sha1('%s%s%f' % (username, key, time.time())).hexdigest() token_dict = {} diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 48c2270cb..ec2053d96 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -63,23 +63,21 @@ def _translate_detail_keys(inst): inst_dict['addresses'] = dict(public=[], private=[]) # grab single private fixed ip - try: - private_ip = inst['fixed_ip']['address'] - if private_ip: - inst_dict['addresses']['private'].append(private_ip) - except KeyError: - LOG.debug(_("Failed to read private ip")) + private_ips = utils.get_from_path(inst, 'fixed_ip/address') + inst_dict['addresses']['private'] = private_ips # grab all public floating ips - try: - for floating in inst['fixed_ip']['floating_ips']: - inst_dict['addresses']['public'].append(floating['address']) - except KeyError: - LOG.debug(_("Failed to read public ip(s)")) + public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address') + inst_dict['addresses']['public'] = public_ips - inst_dict['metadata'] = {} inst_dict['hostId'] = '' + # Return the metadata as a dictionary + metadata = {} + for item in inst['metadata']: + metadata[item['key']] = item['value'] + inst_dict['metadata'] = metadata + return dict(server=inst_dict) @@ -147,9 +145,10 @@ class Controller(wsgi.Controller): try: return image['properties'][param] except KeyError: - raise exception.NotFound( + LOG.debug( _("%(param)s property not found for image %(_image_id)s") % locals()) + return None image_id = str(image_id) image = self._image_service.show(req.environ['nova.context'], image_id) @@ -161,18 +160,29 @@ class Controller(wsgi.Controller): if not env: return faults.Fault(exc.HTTPUnprocessableEntity()) - key_pairs = auth_manager.AuthManager.get_key_pairs( - req.environ['nova.context']) + context = req.environ['nova.context'] + key_pairs = auth_manager.AuthManager.get_key_pairs(context) if not key_pairs: raise exception.NotFound(_("No keypairs defined")) key_pair = key_pairs[0] image_id = common.get_image_id_from_image_hash(self._image_service, - req.environ['nova.context'], env['server']['imageId']) + context, env['server']['imageId']) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) + + # Metadata is a list, not a Dictionary, because we allow duplicate keys + # (even though JSON can't encode this) + # In future, we may not allow duplicate keys. + # However, the CloudServers API is not definitive on this front, + # and we want to be compatible. + metadata = [] + if env['server']['metadata']: + for k, v in env['server']['metadata'].items(): + metadata.append({'key': k, 'value': v}) + instances = self.compute_api.create( - req.environ['nova.context'], + context, instance_types.get_by_flavor_id(env['server']['flavorId']), image_id, kernel_id=kernel_id, @@ -181,6 +191,7 @@ class Controller(wsgi.Controller): display_description=env['server']['name'], key_name=key_pair['name'], key_data=key_pair['public_key'], + metadata=metadata, onset_files=env.get('onset_files', [])) return _translate_keys(instances[0]) diff --git a/nova/compute/api.py b/nova/compute/api.py index 0caadc32e..d9431c679 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -85,7 +85,7 @@ class API(base.Base): min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, + availability_zone=None, user_data=None, metadata=[], onset_files=None): """Create the number of instances requested if quota and other arguments check out ok. @@ -100,6 +100,30 @@ class API(base.Base): "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") + num_metadata = len(metadata) + quota_metadata = quota.allowed_metadata_items(context, num_metadata) + if quota_metadata < num_metadata: + pid = context.project_id + msg = (_("Quota exceeeded for %(pid)s," + " tried to set %(num_metadata)s metadata properties") + % locals()) + LOG.warn(msg) + raise quota.QuotaError(msg, "MetadataLimitExceeded") + + # Because metadata is stored in the DB, we hard-code the size limits + # In future, we may support more variable length strings, so we act + # as if this is quota-controlled for forwards compatibility + for metadata_item in metadata: + k = metadata_item['key'] + v = metadata_item['value'] + if len(k) > 255 or len(v) > 255: + pid = context.project_id + msg = (_("Quota exceeeded for %(pid)s," + " metadata property key or value too long") + % locals()) + LOG.warn(msg) + raise quota.QuotaError(msg, "MetadataLimitExceeded") + image = self.image_service.show(context, image_id) if kernel_id is None: kernel_id = image.get('kernel_id', None) @@ -154,6 +178,7 @@ class API(base.Base): 'key_name': key_name, 'key_data': key_data, 'locked': False, + 'metadata': metadata, 'availability_zone': availability_zone} elevated = context.elevated() instances = [] diff --git a/nova/db/api.py b/nova/db/api.py index d7f3746d2..0a010e727 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -85,8 +85,8 @@ def service_get(context, service_id): def service_get_all(context, disabled=False): - """Get all service.""" - return IMPL.service_get_all(context, None, disabled) + """Get all services.""" + return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 2ab402e1c..d8751bef4 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -136,15 +136,12 @@ def service_get(context, service_id, session=None): @require_admin_context -def service_get_all(context, session=None, disabled=False): - if not session: - session = get_session() - - result = session.query(models.Service).\ +def service_get_all(context, disabled=False): + session = get_session() + return session.query(models.Service).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(disabled=disabled).\ all() - return result @require_admin_context @@ -715,6 +712,7 @@ def instance_get(context, instance_id, session=None): options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -723,6 +721,7 @@ def instance_get(context, instance_id, session=None): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ + options(joinedload('metadata')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py new file mode 100644 index 000000000..4cb07e0d8 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py @@ -0,0 +1,78 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +quotas = Table('quotas', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# + +instance_metadata_table = Table('instance_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + +# +# New columns +# +quota_metadata_items = Column('metadata_items', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (instance_metadata_table, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + quotas.create_column(quota_metadata_items) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py new file mode 100644 index 000000000..705fc8ff3 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# +# None + +# +# Tables to alter +# +# None + +# +# Columns to add to existing tables +# + +volumes_provider_location = Column('provider_location', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + +volumes_provider_auth = Column('provider_auth', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(volumes_provider_location) + volumes.create_column(volumes_provider_auth) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 40a96fc17..1882efeba 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -243,6 +243,9 @@ class Volume(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) + provider_location = Column(String(255)) + provider_auth = Column(String(255)) + class Quota(BASE, NovaBase): """Represents quota overrides for a project.""" @@ -256,6 +259,7 @@ class Quota(BASE, NovaBase): volumes = Column(Integer) gigabytes = Column(Integer) floating_ips = Column(Integer) + metadata_items = Column(Integer) class ExportDevice(BASE, NovaBase): @@ -536,6 +540,20 @@ class Console(BASE, NovaBase): pool = relationship(ConsolePool, backref=backref('consoles')) +class InstanceMetadata(BASE, NovaBase): + """Represents a metadata key/value pair for an instance""" + __tablename__ = 'instance_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) + instance = relationship(Instance, backref="metadata", + foreign_keys=instance_id, + primaryjoin='and_(' + 'InstanceMetadata.instance_id == Instance.id,' + 'InstanceMetadata.deleted == False)') + + class Zone(BASE, NovaBase): """Represents a child zone of this zone.""" __tablename__ = 'zones' @@ -557,7 +575,8 @@ def register_models(): Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate, ConsolePool, Console, Zone) + Project, Certificate, ConsolePool, Console, Zone, + InstanceMetadata) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/flags.py b/nova/flags.py index f64a62da9..24bca0caf 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -160,9 +160,45 @@ class StrWrapper(object): raise KeyError(name) -FLAGS = FlagValues() -gflags.FLAGS = FLAGS -gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS) +# Copied from gflags with small mods to get the naming correct. +# Originally gflags checks for the first module that is not gflags that is +# in the call chain, we want to check for the first module that is not gflags +# and not this module. +def _GetCallingModule(): + """Returns the name of the module that's calling into this module. + + We generally use this function to get the name of the module calling a + DEFINE_foo... function. + """ + # Walk down the stack to find the first globals dict that's not ours. + for depth in range(1, sys.getrecursionlimit()): + if not sys._getframe(depth).f_globals is globals(): + module_name = __GetModuleName(sys._getframe(depth).f_globals) + if module_name == 'gflags': + continue + if module_name is not None: + return module_name + raise AssertionError("No module was found") + + +# Copied from gflags because it is a private function +def __GetModuleName(globals_dict): + """Given a globals dict, returns the name of the module that defines it. + + Args: + globals_dict: A dictionary that should correspond to an environment + providing the values of the globals. + + Returns: + A string (the name of the module) or None (if the module could not + be identified. + """ + for name, module in sys.modules.iteritems(): + if getattr(module, '__dict__', None) is globals_dict: + if name == '__main__': + return sys.argv[0] + return name + return None def _wrapper(func): @@ -173,6 +209,11 @@ def _wrapper(func): return _wrapped +FLAGS = FlagValues() +gflags.FLAGS = FLAGS +gflags._GetCallingModule = _GetCallingModule + + DEFINE = _wrapper(gflags.DEFINE) DEFINE_string = _wrapper(gflags.DEFINE_string) DEFINE_integer = _wrapper(gflags.DEFINE_integer) @@ -185,8 +226,6 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist) DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) DEFINE_flag = _wrapper(gflags.DEFINE_flag) - - HelpFlag = gflags.HelpFlag HelpshortFlag = gflags.HelpshortFlag HelpXMLFlag = gflags.HelpXMLFlag diff --git a/nova/log.py b/nova/log.py index 10c14d74b..591d26c63 100644 --- a/nova/log.py +++ b/nova/log.py @@ -236,16 +236,17 @@ class NovaRootLogger(NovaLogger): def __init__(self, name, level=NOTSET): self.logpath = None self.filelog = None - self.syslog = SysLogHandler(address='/dev/log') self.streamlog = StreamHandler() + self.syslog = None NovaLogger.__init__(self, name, level) def setup_from_flags(self): """Setup logger from flags""" global _filelog if FLAGS.use_syslog: + self.syslog = SysLogHandler(address='/dev/log') self.addHandler(self.syslog) - else: + elif self.syslog: self.removeHandler(self.syslog) logpath = _get_log_file_path() if logpath: diff --git a/nova/network/manager.py b/nova/network/manager.py index c6eba225e..12a0c5018 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -322,6 +322,16 @@ class FlatManager(NetworkManager): """ timeout_fixed_ips = False + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service. + """ + #Fix for bug 723298 - do not call init_host on superclass + #Following code has been copied for NetworkManager.init_host + ctxt = context.get_admin_context() + for network in self.db.host_get_networks(ctxt, self.host): + self._on_set_network_host(ctxt, network['id']) + def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Gets a fixed ip from the pool.""" # TODO(vish): when this is called by compute, we can associate compute @@ -359,6 +369,7 @@ class FlatManager(NetworkManager): project_net = IPy.IP(cidr) net = {} net['bridge'] = FLAGS.flat_network_bridge + net['dns'] = FLAGS.flat_network_dns net['cidr'] = cidr net['netmask'] = str(project_net.netmask()) net['gateway'] = str(project_net[1]) @@ -406,6 +417,22 @@ class FlatManager(NetworkManager): net['dns'] = FLAGS.flat_network_dns self.db.network_update(context, network_id, net) + def allocate_floating_ip(self, context, project_id): + #Fix for bug 723298 + raise NotImplementedError() + + def associate_floating_ip(self, context, floating_address, fixed_address): + #Fix for bug 723298 + raise NotImplementedError() + + def disassociate_floating_ip(self, context, floating_address): + #Fix for bug 723298 + raise NotImplementedError() + + def deallocate_floating_ip(self, context, floating_address): + #Fix for bug 723298 + raise NotImplementedError() + class FlatDHCPManager(FlatManager): """Flat networking with dhcp. diff --git a/nova/quota.py b/nova/quota.py index 3884eb308..6b52a97fa 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -35,6 +35,8 @@ flags.DEFINE_integer('quota_gigabytes', 1000, 'number of volume gigabytes allowed per project') flags.DEFINE_integer('quota_floating_ips', 10, 'number of floating ips allowed per project') +flags.DEFINE_integer('quota_metadata_items', 128, + 'number of metadata items allowed per instance') def get_quota(context, project_id): @@ -42,7 +44,8 @@ def get_quota(context, project_id): 'cores': FLAGS.quota_cores, 'volumes': FLAGS.quota_volumes, 'gigabytes': FLAGS.quota_gigabytes, - 'floating_ips': FLAGS.quota_floating_ips} + 'floating_ips': FLAGS.quota_floating_ips, + 'metadata_items': FLAGS.quota_metadata_items} try: quota = db.quota_get(context, project_id) for key in rval.keys(): @@ -94,6 +97,15 @@ def allowed_floating_ips(context, num_floating_ips): return min(num_floating_ips, allowed_floating_ips) +def allowed_metadata_items(context, num_metadata_items): + """Check quota; return min(num_metadata_items,allowed_metadata_items)""" + project_id = context.project_id + context = context.elevated() + quota = get_quota(context, project_id) + num_allowed_metadata_items = quota['metadata_items'] + return min(num_metadata_items, num_allowed_metadata_items) + + class QuotaError(exception.ApiError): """Quota Exceeeded""" pass diff --git a/nova/service.py b/nova/service.py index cc88ac233..f47358089 100644 --- a/nova/service.py +++ b/nova/service.py @@ -45,15 +45,10 @@ FLAGS = flags.FLAGS flags.DEFINE_integer('report_interval', 10, 'seconds between nodes reporting state to datastore', lower_bound=1) - flags.DEFINE_integer('periodic_interval', 60, 'seconds between running periodic tasks', lower_bound=1) -flags.DEFINE_flag(flags.HelpFlag()) -flags.DEFINE_flag(flags.HelpshortFlag()) -flags.DEFINE_flag(flags.HelpXMLFlag()) - class Service(object): """Base class for workers that run on hosts.""" @@ -64,6 +59,8 @@ class Service(object): self.binary = binary self.topic = topic self.manager_class_name = manager + manager_class = utils.import_class(self.manager_class_name) + self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval super(Service, self).__init__(*args, **kwargs) @@ -71,9 +68,9 @@ class Service(object): self.timers = [] def start(self): - manager_class = utils.import_class(self.manager_class_name) - self.manager = manager_class(host=self.host, *self.saved_args, - **self.saved_kwargs) + vcs_string = version.version_string_with_vcs() + logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)"), + {'topic': self.topic, 'vcs_string': vcs_string}) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() @@ -153,9 +150,6 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - vcs_string = version.version_string_with_vcs() - logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)") - % locals()) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -217,8 +211,19 @@ class Service(object): def serve(*services): - if not services: - services = [Service.create()] + try: + if not services: + services = [Service.create()] + except Exception: + logging.exception('in Service.create()') + raise + finally: + # After we've loaded up all our dynamic bits, check + # whether we should print help + flags.DEFINE_flag(flags.HelpFlag()) + flags.DEFINE_flag(flags.HelpshortFlag()) + flags.DEFINE_flag(flags.HelpXMLFlag()) + FLAGS.ParseNewFlags() name = '_'.join(x.binary for x in services) logging.debug(_("Serving %s"), name) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index e0b7b8029..fb282f1c9 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -221,8 +221,7 @@ class FakeAuthDatabase(object): class FakeAuthManager(object): auth_data = {} - def add_user(self, user): - key = user.id + def add_user(self, key, user): FakeAuthManager.auth_data[key] = user def get_user(self, uid): @@ -235,10 +234,7 @@ class FakeAuthManager(object): return None def get_user_from_access_key(self, key): - for k, v in FakeAuthManager.auth_data.iteritems(): - if v.access == key: - return v - return None + return FakeAuthManager.auth_data.get(key, None) class FakeRateLimiter(object): diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index eab78b50c..0dd65d321 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -48,7 +48,7 @@ class Test(unittest.TestCase): def test_authorize_user(self): f = fakes.FakeAuthManager() - f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None)) + f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' @@ -62,7 +62,7 @@ class Test(unittest.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None)) + f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'herp' @@ -144,7 +144,7 @@ class TestLimiter(unittest.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None)) + f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 589f3d3eb..054996658 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -28,6 +28,7 @@ import nova.api.openstack from nova.api.openstack import servers import nova.db.api from nova.db.sqlalchemy.models import Instance +from nova.db.sqlalchemy.models import InstanceMetadata import nova.rpc from nova.tests.api.openstack import fakes @@ -64,6 +65,9 @@ def instance_address(context, instance_id): def stub_instance(id, user_id=1, private_address=None, public_addresses=None): + metadata = [] + metadata.append(InstanceMetadata(key='seq', value=id)) + if public_addresses == None: public_addresses = list() @@ -95,7 +99,8 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None): "availability_zone": "", "display_name": "server%s" % id, "display_description": "", - "locked": False} + "locked": False, + "metadata": metadata} instance["fixed_ip"] = { "address": private_address, @@ -214,7 +219,8 @@ class ServersTest(unittest.TestCase): "get_image_id_from_image_hash", image_id_from_hash) body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, + name='server_test', imageId=2, flavorId=2, + metadata={'hello': 'world', 'open': 'stack'}, personality={})) req = webob.Request.blank('/v1.0/servers') req.method = 'POST' @@ -291,6 +297,7 @@ class ServersTest(unittest.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['name'], 'server%d' % i) self.assertEqual(s['imageId'], 10) + self.assertEqual(s['metadata']['seq'], i) i += 1 def test_server_pause(self): diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index cfa65c137..2b1919407 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -39,6 +39,5 @@ FLAGS.num_shelves = 2 FLAGS.blades_per_shelf = 4 FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True -FLAGS.sql_connection = 'sqlite:///nova.sqlite' +FLAGS.sql_connection = 'sqlite:///tests.sqlite' FLAGS.use_ipv6 = True -FLAGS.logfile = 'tests.log' diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 2c6dc5973..061910013 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -136,6 +136,22 @@ class CloudTestCase(test.TestCase): db.instance_destroy(self.context, inst['id']) db.floating_ip_destroy(self.context, address) + def test_describe_security_groups(self): + """Makes sure describe_security_groups works and filters results.""" + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + result = self.cloud.describe_security_groups(self.context) + # NOTE(vish): should have the default group as well + self.assertEqual(len(result['securityGroupInfo']), 2) + result = self.cloud.describe_security_groups(self.context, + group_name=[sec['name']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + result['securityGroupInfo'][0]['groupName'], + sec['name']) + db.security_group_destroy(self.context, sec['id']) + def test_describe_volumes(self): """Makes sure describe_volumes works and filters results.""" vol1 = db.volume_create(self.context, {}) @@ -294,19 +310,6 @@ class CloudTestCase(test.TestCase): LOG.debug(_("Terminating instance %s"), instance_id) rv = self.compute.terminate_instance(instance_id) - def test_describe_instances(self): - """Makes sure describe_instances works.""" - instance1 = db.instance_create(self.context, {'host': 'host2'}) - comp1 = db.service_create(self.context, {'host': 'host2', - 'availability_zone': 'zone1', - 'topic': "compute"}) - result = self.cloud.describe_instances(self.context) - self.assertEqual(result['reservationSet'][0] - ['instancesSet'][0] - ['placement']['availabilityZone'], 'zone1') - db.instance_destroy(self.context, instance1['id']) - db.service_destroy(self.context, comp1['id']) - @staticmethod def _fake_set_image_description(ctxt, image_id, description): from nova.objectstore import handler diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 9548a8c13..1e42fddf3 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -16,6 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import compute from nova import context from nova import db from nova import flags @@ -87,6 +88,18 @@ class QuotaTestCase(test.TestCase): num_instances = quota.allowed_instances(self.context, 100, instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 10) + + # metadata_items + too_many_items = FLAGS.quota_metadata_items + 1000 + num_metadata_items = quota.allowed_metadata_items(self.context, + too_many_items) + self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items) + db.quota_update(self.context, self.project.id, {'metadata_items': 5}) + num_metadata_items = quota.allowed_metadata_items(self.context, + too_many_items) + self.assertEqual(num_metadata_items, 5) + + # Cleanup db.quota_destroy(self.context, self.project.id) def test_too_many_instances(self): @@ -151,3 +164,15 @@ class QuotaTestCase(test.TestCase): self.assertRaises(quota.QuotaError, self.cloud.allocate_address, self.context) db.floating_ip_destroy(context.get_admin_context(), address) + + def test_too_many_metadata_items(self): + metadata = {} + for i in range(FLAGS.quota_metadata_items + 1): + metadata['key%s' % i] = 'value%s' % i + self.assertRaises(quota.QuotaError, compute.API().create, + self.context, + min_count=1, + max_count=1, + instance_type='m1.small', + image_id='fake', + metadata=metadata) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index a67c8d1e8..45d9afa6c 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -50,13 +50,6 @@ class ExtendedService(service.Service): class ServiceManagerTestCase(test.TestCase): """Test cases for Services""" - def test_attribute_error_for_no_manager(self): - serv = service.Service('test', - 'test', - 'test', - 'nova.tests.test_service.FakeManager') - self.assertRaises(AttributeError, getattr, serv, 'test_method') - def test_message_gets_to_manager(self): serv = service.Service('test', 'test', diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py new file mode 100644 index 000000000..34a407f1a --- /dev/null +++ b/nova/tests/test_utils.py @@ -0,0 +1,174 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test +from nova import utils +from nova import exception + + +class GetFromPathTestCase(test.TestCase): + def test_tolerates_nones(self): + f = utils.get_from_path + + input = [] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [None] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': None}] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': None}}] + self.assertEquals([{'b': None}], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}] + self.assertEquals([{'b': {'c': None}}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': None}] + self.assertEquals([{'b': {'c': None}}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}] + self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + def test_does_select(self): + f = utils.get_from_path + + input = [{'a': 'a_1'}] + self.assertEquals(['a_1'], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': 'b_1'}}] + self.assertEquals([{'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}] + self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': None}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': {'c': 'c_2'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], + f(input, "a")) + self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b")) + self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c")) + + self.assertEquals([], f(input, "a/b/c/d")) + self.assertEquals([], f(input, "c/a/b/d")) + self.assertEquals([], f(input, "i/r/t")) + + def test_flattens_lists(self): + f = utils.get_from_path + + input = [{'a': [1, 2, 3]}] + self.assertEquals([1, 2, 3], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}] + self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEquals([1, 2, 3], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}] + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}] + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': [1, 2, {'b': 'b_1'}]}] + self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + + def test_bad_xpath(self): + f = utils.get_from_path + + self.assertRaises(exception.Error, f, [], None) + self.assertRaises(exception.Error, f, [], "") + self.assertRaises(exception.Error, f, [], "/") + self.assertRaises(exception.Error, f, [], "/a") + self.assertRaises(exception.Error, f, [], "/a/") + self.assertRaises(exception.Error, f, [], "//") + self.assertRaises(exception.Error, f, [], "//a") + self.assertRaises(exception.Error, f, [], "a//a") + self.assertRaises(exception.Error, f, [], "a//a/") + self.assertRaises(exception.Error, f, [], "a/a/") + + def test_real_failure1(self): + # Real world failure case... + # We weren't coping when the input was a Dictionary instead of a List + # This led to test_accepts_dictionaries + f = utils.get_from_path + + inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}], + 'address': '192.168.0.3'}, + 'hostname': ''} + + private_ips = f(inst, 'fixed_ip/address') + public_ips = f(inst, 'fixed_ip/floating_ips/address') + self.assertEquals(['192.168.0.3'], private_ips) + self.assertEquals(['1.2.3.4'], public_ips) + + def test_accepts_dictionaries(self): + f = utils.get_from_path + + input = {'a': [1, 2, 3]} + self.assertEquals([1, 2, 3], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': {'b': [1, 2, 3]}} + self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEquals([1, 2, 3], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]} + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': [1, 2, {'b': 'b_1'}]} + self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) diff --git a/nova/utils.py b/nova/utils.py index 2a3acf042..0cf91e0cc 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -2,6 +2,7 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -31,6 +32,7 @@ import string import struct import sys import time +import types from xml.sax import saxutils import re import netaddr @@ -499,3 +501,52 @@ def ensure_b64_encoding(val): return val except TypeError: return base64.b64encode(val) + + +def get_from_path(items, path): + """ Returns a list of items matching the specified path. Takes an + XPath-like expression e.g. prop1/prop2/prop3, and for each item in items, + looks up items[prop1][prop2][prop3]. Like XPath, if any of the + intermediate results are lists it will treat each list item individually. + A 'None' in items or any child expressions will be ignored, this function + will not throw because of None (anywhere) in items. The returned list + will contain no None values.""" + + if path is None: + raise exception.Error("Invalid mini_xpath") + + (first_token, sep, remainder) = path.partition("/") + + if first_token == "": + raise exception.Error("Invalid mini_xpath") + + results = [] + + if items is None: + return results + + if not isinstance(items, types.ListType): + # Wrap single objects in a list + items = [items] + + for item in items: + if item is None: + continue + get_method = getattr(item, "get", None) + if get_method is None: + continue + child = get_method(first_token) + if child is None: + continue + if isinstance(child, types.ListType): + # Flatten intermediate lists + for x in child: + results.append(x) + else: + results.append(child) + + if not sep: + # No more tokens + return results + else: + return get_from_path(results, remainder) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 82f4c2f54..e3744c790 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -21,6 +21,7 @@ Drivers for volumes. """ import time +import os from nova import exception from nova import flags @@ -36,6 +37,8 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0', 'Which device to export the volumes on') flags.DEFINE_string('num_shell_tries', 3, 'number of times to attempt to run flakey shell commands') +flags.DEFINE_string('num_iscsi_scan_tries', 3, + 'number of times to rescan iSCSI target to find volume') flags.DEFINE_integer('num_shelves', 100, 'Number of vblade shelves') @@ -88,7 +91,8 @@ class VolumeDriver(object): % FLAGS.volume_group) def create_volume(self, volume): - """Creates a logical volume.""" + """Creates a logical volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" if int(volume['size']) == 0: sizestr = '100M' else: @@ -123,7 +127,8 @@ class VolumeDriver(object): raise NotImplementedError() def create_export(self, context, volume): - """Exports the volume.""" + """Exports the volume. Can optionally return a Dictionary of changes + to the volume object to be persisted.""" raise NotImplementedError() def remove_export(self, context, volume): @@ -222,7 +227,18 @@ class FakeAOEDriver(AOEDriver): class ISCSIDriver(VolumeDriver): - """Executes commands relating to ISCSI volumes.""" + """Executes commands relating to ISCSI volumes. + + We make use of model provider properties as follows: + + :provider_location: if present, contains the iSCSI target information + in the same format as an ietadm discovery + i.e. '<ip>:<port>,<portal> <target IQN>' + + :provider_auth: if present, contains a space-separated triple: + '<auth method> <auth username> <auth password>'. + `CHAP` is the only auth_method in use at the moment. + """ def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" @@ -294,40 +310,149 @@ class ISCSIDriver(VolumeDriver): self._execute("sudo ietadm --op delete --tid=%s" % iscsi_target) - def _get_name_and_portal(self, volume): - """Gets iscsi name and portal from volume name and host.""" + def _do_iscsi_discovery(self, volume): + #TODO(justinsb): Deprecate discovery and use stored info + #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) + LOG.warn(_("ISCSI provider_location not stored, using discovery")) + volume_name = volume['name'] - host = volume['host'] + (out, _err) = self._execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % host) + "sendtargets -p %s" % (volume['host'])) for target in out.splitlines(): if FLAGS.iscsi_ip_prefix in target and volume_name in target: - (location, _sep, iscsi_name) = target.partition(" ") - break - iscsi_portal = location.split(",")[0] - return (iscsi_name, iscsi_portal) + return target + return None + + def _get_iscsi_properties(self, volume): + """Gets iscsi configuration + + We ideally get saved information in the volume entity, but fall back + to discovery if need be. Discovery may be completely removed in future + The properties are: + + :target_discovered: boolean indicating whether discovery was used + + :target_iqn: the IQN of the iSCSI target + + :target_portal: the portal of the iSCSI target + + :auth_method:, :auth_username:, :auth_password: + + the authentication details. Right now, either auth_method is not + present meaning no authentication, or auth_method == `CHAP` + meaning use CHAP with the specified credentials. + """ + + properties = {} + + location = volume['provider_location'] + + if location: + # provider_location is the same format as iSCSI discovery output + properties['target_discovered'] = False + else: + location = self._do_iscsi_discovery(volume) + + if not location: + raise exception.Error(_("Could not find iSCSI export " + " for volume %s") % + (volume['name'])) + + LOG.debug(_("ISCSI Discovery: Found %s") % (location)) + properties['target_discovered'] = True + + (iscsi_target, _sep, iscsi_name) = location.partition(" ") + + iscsi_portal = iscsi_target.split(",")[0] + + properties['target_iqn'] = iscsi_name + properties['target_portal'] = iscsi_portal + + auth = volume['provider_auth'] + + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return properties + + def _run_iscsiadm(self, iscsi_properties, iscsi_command): + command = ("sudo iscsiadm -m node -T %s -p %s %s" % + (iscsi_properties['target_iqn'], + iscsi_properties['target_portal'], + iscsi_command)) + (out, err) = self._execute(command) + LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % + (iscsi_command, out, err)) + return (out, err) + + def _iscsiadm_update(self, iscsi_properties, property_key, property_value): + iscsi_command = ("--op update -n %s -v %s" % + (property_key, property_value)) + return self._run_iscsiadm(iscsi_properties, iscsi_command) def discover_volume(self, volume): """Discover volume on a remote host.""" - iscsi_name, iscsi_portal = self._get_name_and_portal(volume) - self._execute("sudo iscsiadm -m node -T %s -p %s --login" % - (iscsi_name, iscsi_portal)) - self._execute("sudo iscsiadm -m node -T %s -p %s --op update " - "-n node.startup -v automatic" % - (iscsi_name, iscsi_portal)) - return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal, - iscsi_name) + iscsi_properties = self._get_iscsi_properties(volume) + + if not iscsi_properties['target_discovered']: + self._run_iscsiadm(iscsi_properties, "--op new") + + if iscsi_properties.get('auth_method'): + self._iscsiadm_update(iscsi_properties, + "node.session.auth.authmethod", + iscsi_properties['auth_method']) + self._iscsiadm_update(iscsi_properties, + "node.session.auth.username", + iscsi_properties['auth_username']) + self._iscsiadm_update(iscsi_properties, + "node.session.auth.password", + iscsi_properties['auth_password']) + + self._run_iscsiadm(iscsi_properties, "--login") + + self._iscsiadm_update(iscsi_properties, "node.startup", "automatic") + + mount_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % + (iscsi_properties['target_portal'], + iscsi_properties['target_iqn'])) + + # The /dev/disk/by-path/... node is not always present immediately + # TODO(justinsb): This retry-with-delay is a pattern, move to utils? + tries = 0 + while not os.path.exists(mount_device): + if tries >= FLAGS.num_iscsi_scan_tries: + raise exception.Error(_("iSCSI device not found at %s") % + (mount_device)) + + LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. " + "Will rescan & retry. Try number: %(tries)s") % + locals()) + + # The rescan isn't documented as being necessary(?), but it helps + self._run_iscsiadm(iscsi_properties, "--rescan") + + tries = tries + 1 + if not os.path.exists(mount_device): + time.sleep(tries ** 2) + + if tries != 0: + LOG.debug(_("Found iSCSI node %(mount_device)s " + "(after %(tries)s rescans)") % + locals()) + + return mount_device def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" - iscsi_name, iscsi_portal = self._get_name_and_portal(volume) - self._execute("sudo iscsiadm -m node -T %s -p %s --op update " - "-n node.startup -v manual" % - (iscsi_name, iscsi_portal)) - self._execute("sudo iscsiadm -m node -T %s -p %s --logout " % - (iscsi_name, iscsi_portal)) - self._execute("sudo iscsiadm -m node --op delete " - "--targetname %s" % iscsi_name) + iscsi_properties = self._get_iscsi_properties(volume) + self._iscsiadm_update(iscsi_properties, "node.startup", "manual") + self._run_iscsiadm(iscsi_properties, "--logout") + self._run_iscsiadm(iscsi_properties, "--op delete") class FakeISCSIDriver(ISCSIDriver): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index d2f02e4e0..3e8bc16b3 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -107,10 +107,14 @@ class VolumeManager(manager.Manager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - self.driver.create_volume(volume_ref) + model_update = self.driver.create_volume(volume_ref) + if model_update: + self.db.volume_update(context, volume_ref['id'], model_update) LOG.debug(_("volume %s: creating export"), volume_ref['name']) - self.driver.create_export(context, volume_ref) + model_update = self.driver.create_export(context, volume_ref) + if model_update: + self.db.volume_update(context, volume_ref['id'], model_update) except Exception: self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) diff --git a/nova/volume/san.py b/nova/volume/san.py index 26d6125e7..9532c8116 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -16,13 +16,16 @@ # under the License. """ Drivers for san-stored volumes. + The unique thing about a SAN is that we don't expect that we can run the volume - controller on the SAN hardware. We expect to access it over SSH or some API. +controller on the SAN hardware. We expect to access it over SSH or some API. """ import os import paramiko +from xml.etree import ElementTree + from nova import exception from nova import flags from nova import log as logging @@ -41,37 +44,19 @@ flags.DEFINE_string('san_password', '', 'Password for SAN controller') flags.DEFINE_string('san_privatekey', '', 'Filename of private key to use for SSH authentication') +flags.DEFINE_string('san_clustername', '', + 'Cluster name to use for creating volumes') +flags.DEFINE_integer('san_ssh_port', 22, + 'SSH port to use with SAN') class SanISCSIDriver(ISCSIDriver): """ Base class for SAN-style storage volumes - (storage providers we access over SSH)""" - #Override because SAN ip != host ip - def _get_name_and_portal(self, volume): - """Gets iscsi name and portal from volume name and host.""" - volume_name = volume['name'] - - # TODO(justinsb): store in volume, remerge with generic iSCSI code - host = FLAGS.san_ip - - (out, _err) = self._execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % host) - - location = None - find_iscsi_name = self._build_iscsi_target_name(volume) - for target in out.splitlines(): - if find_iscsi_name in target: - (location, _sep, iscsi_name) = target.partition(" ") - break - if not location: - raise exception.Error(_("Could not find iSCSI export " - " for volume %s") % - volume_name) - - iscsi_portal = location.split(",")[0] - LOG.debug("iscsi_name=%s, iscsi_portal=%s" % - (iscsi_name, iscsi_portal)) - return (iscsi_name, iscsi_portal) + + A SAN-style storage value is 'different' because the volume controller + probably won't run on it, so we need to access is over SSH or another + remote protocol. + """ def _build_iscsi_target_name(self, volume): return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) @@ -85,6 +70,7 @@ class SanISCSIDriver(ISCSIDriver): ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if FLAGS.san_password: ssh.connect(FLAGS.san_ip, + port=FLAGS.san_ssh_port, username=FLAGS.san_login, password=FLAGS.san_password) elif FLAGS.san_privatekey: @@ -92,10 +78,11 @@ class SanISCSIDriver(ISCSIDriver): # It sucks that paramiko doesn't support DSA keys privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile) ssh.connect(FLAGS.san_ip, + port=FLAGS.san_ssh_port, username=FLAGS.san_login, pkey=privatekey) else: - raise exception.Error("Specify san_password or san_privatekey") + raise exception.Error(_("Specify san_password or san_privatekey")) return ssh def _run_ssh(self, command, check_exit_code=True): @@ -124,10 +111,10 @@ class SanISCSIDriver(ISCSIDriver): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" if not (FLAGS.san_password or FLAGS.san_privatekey): - raise exception.Error("Specify san_password or san_privatekey") + raise exception.Error(_("Specify san_password or san_privatekey")) if not (FLAGS.san_ip): - raise exception.Error("san_ip must be set") + raise exception.Error(_("san_ip must be set")) def _collect_lines(data): @@ -155,17 +142,27 @@ def _get_prefixed_values(data, prefix): class SolarisISCSIDriver(SanISCSIDriver): """Executes commands relating to Solaris-hosted ISCSI volumes. + Basic setup for a Solaris iSCSI server: + pkg install storage-server SUNWiscsit + svcadm enable stmf + svcadm enable -r svc:/network/iscsi/target:default + pfexec itadm create-tpg e1000g0 ${MYIP} + pfexec itadm create-target -t e1000g0 + Then grant the user that will be logging on lots of permissions. I'm not sure exactly which though: + zfs allow justinsb create,mount,destroy rpool + usermod -P'File System Management' justinsb + usermod -P'Primary Administrator' justinsb Also make sure you can login using san_login & san_password/san_privatekey @@ -306,6 +303,17 @@ class SolarisISCSIDriver(SanISCSIDriver): self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" % (target_group_name, luid)) + #TODO(justinsb): Is this always 1? Does it matter? + iscsi_portal_interface = '1' + iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface + + db_update = {} + db_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_name)) + + return db_update + def remove_export(self, context, volume): """Removes an export for a logical volume.""" @@ -333,3 +341,245 @@ class SolarisISCSIDriver(SanISCSIDriver): if self._is_lu_created(volume): self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" % (luid)) + + +class HpSanISCSIDriver(SanISCSIDriver): + """Executes commands relating to HP/Lefthand SAN ISCSI volumes. + + We use the CLIQ interface, over SSH. + + Rough overview of CLIQ commands used: + + :createVolume: (creates the volume) + + :getVolumeInfo: (to discover the IQN etc) + + :getClusterInfo: (to discover the iSCSI target IP address) + + :assignVolumeChap: (exports it with CHAP security) + + The 'trick' here is that the HP SAN enforces security by default, so + normally a volume mount would need both to configure the SAN in the volume + layer and do the mount on the compute layer. Multi-layer operations are + not catered for at the moment in the nova architecture, so instead we + share the volume using CHAP at volume creation time. Then the mount need + only use those CHAP credentials, so can take place exclusively in the + compute layer. + """ + + def _cliq_run(self, verb, cliq_args): + """Runs a CLIQ command over SSH, without doing any result parsing""" + cliq_arg_strings = [] + for k, v in cliq_args.items(): + cliq_arg_strings.append(" %s=%s" % (k, v)) + cmd = verb + ''.join(cliq_arg_strings) + + return self._run_ssh(cmd) + + def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True): + """Runs a CLIQ command over SSH, parsing and checking the output""" + cliq_args['output'] = 'XML' + (out, _err) = self._cliq_run(verb, cliq_args) + + LOG.debug(_("CLIQ command returned %s"), out) + + result_xml = ElementTree.fromstring(out) + if check_cliq_result: + response_node = result_xml.find("response") + if response_node is None: + msg = (_("Malformed response to CLIQ command " + "%(verb)s %(cliq_args)s. Result=%(out)s") % + locals()) + raise exception.Error(msg) + + result_code = response_node.attrib.get("result") + + if result_code != "0": + msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. " + " Result=%(out)s") % + locals()) + raise exception.Error(msg) + + return result_xml + + def _cliq_get_cluster_info(self, cluster_name): + """Queries for info about the cluster (including IP)""" + cliq_args = {} + cliq_args['clusterName'] = cluster_name + cliq_args['searchDepth'] = '1' + cliq_args['verbose'] = '0' + + result_xml = self._cliq_run_xml("getClusterInfo", cliq_args) + + return result_xml + + def _cliq_get_cluster_vip(self, cluster_name): + """Gets the IP on which a cluster shares iSCSI volumes""" + cluster_xml = self._cliq_get_cluster_info(cluster_name) + + vips = [] + for vip in cluster_xml.findall("response/cluster/vip"): + vips.append(vip.attrib.get('ipAddress')) + + if len(vips) == 1: + return vips[0] + + _xml = ElementTree.tostring(cluster_xml) + msg = (_("Unexpected number of virtual ips for cluster " + " %(cluster_name)s. Result=%(_xml)s") % + locals()) + raise exception.Error(msg) + + def _cliq_get_volume_info(self, volume_name): + """Gets the volume info, including IQN""" + cliq_args = {} + cliq_args['volumeName'] = volume_name + result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args) + + # Result looks like this: + #<gauche version="1.0"> + # <response description="Operation succeeded." name="CliqSuccess" + # processingTime="87" result="0"> + # <volume autogrowPages="4" availability="online" blockSize="1024" + # bytesWritten="0" checkSum="false" clusterName="Cluster01" + # created="2011-02-08T19:56:53Z" deleting="false" description="" + # groupName="Group01" initialQuota="536870912" isPrimary="true" + # iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:vol-b" + # maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca" + # minReplication="1" name="vol-b" parity="0" replication="2" + # reserveQuota="536870912" scratchQuota="4194304" + # serialNumber="9fa5c8b2cca54b2948a63d833097e1ca0000000000006316" + # size="1073741824" stridePages="32" thinProvision="true"> + # <status description="OK" value="2"/> + # <permission access="rw" + # authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7" + # chapName="chapusername" chapRequired="true" id="25369" + # initiatorSecret="" iqn="" iscsiEnabled="true" + # loadBalance="true" targetSecret="supersecret"/> + # </volume> + # </response> + #</gauche> + + # Flatten the nodes into a dictionary; use prefixes to avoid collisions + volume_attributes = {} + + volume_node = result_xml.find("response/volume") + for k, v in volume_node.attrib.items(): + volume_attributes["volume." + k] = v + + status_node = volume_node.find("status") + if not status_node is None: + for k, v in status_node.attrib.items(): + volume_attributes["status." + k] = v + + # We only consider the first permission node + permission_node = volume_node.find("permission") + if not permission_node is None: + for k, v in status_node.attrib.items(): + volume_attributes["permission." + k] = v + + LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") % + locals()) + return volume_attributes + + def create_volume(self, volume): + """Creates a volume.""" + cliq_args = {} + cliq_args['clusterName'] = FLAGS.san_clustername + #TODO(justinsb): Should we default to inheriting thinProvision? + cliq_args['thinProvision'] = '1' if FLAGS.san_thin_provision else '0' + cliq_args['volumeName'] = volume['name'] + if int(volume['size']) == 0: + cliq_args['size'] = '100MB' + else: + cliq_args['size'] = '%sGB' % volume['size'] + + self._cliq_run_xml("createVolume", cliq_args) + + volume_info = self._cliq_get_volume_info(volume['name']) + cluster_name = volume_info['volume.clusterName'] + iscsi_iqn = volume_info['volume.iscsiIqn'] + + #TODO(justinsb): Is this always 1? Does it matter? + cluster_interface = '1' + + cluster_vip = self._cliq_get_cluster_vip(cluster_name) + iscsi_portal = cluster_vip + ":3260," + cluster_interface + + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_iqn)) + + return model_update + + def delete_volume(self, volume): + """Deletes a volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['prompt'] = 'false' # Don't confirm + + self._cliq_run_xml("deleteVolume", cliq_args) + + def local_path(self, volume): + # TODO(justinsb): Is this needed here? + raise exception.Error(_("local_path not supported")) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + return self._do_export(context, volume, force_create=False) + + def create_export(self, context, volume): + return self._do_export(context, volume, force_create=True) + + def _do_export(self, context, volume, force_create): + """Supports ensure_export and create_export""" + volume_info = self._cliq_get_volume_info(volume['name']) + + is_shared = 'permission.authGroup' in volume_info + + model_update = {} + + should_export = False + + if force_create or not is_shared: + should_export = True + # Check that we have a project_id + project_id = volume['project_id'] + if not project_id: + project_id = context.project_id + + if project_id: + #TODO(justinsb): Use a real per-project password here + chap_username = 'proj_' + project_id + # HP/Lefthand requires that the password be >= 12 characters + chap_password = 'project_secret_' + project_id + else: + msg = (_("Could not determine project for volume %s, " + "can't export") % + (volume['name'])) + if force_create: + raise exception.Error(msg) + else: + LOG.warn(msg) + should_export = False + + if should_export: + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['chapName'] = chap_username + cliq_args['targetSecret'] = chap_password + + self._cliq_run_xml("assignVolumeChap", cliq_args) + + model_update['provider_auth'] = ("CHAP %s %s" % + (chap_username, chap_password)) + + return model_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + + self._cliq_run_xml("unassignVolume", cliq_args) |
