diff options
| author | Josh Kearney <josh.kearney@rackspace.com> | 2011-02-11 15:18:32 -0600 |
|---|---|---|
| committer | Josh Kearney <josh.kearney@rackspace.com> | 2011-02-11 15:18:32 -0600 |
| commit | fa4e3af4c8d4161cdb90f0ac54f357e9724cbc22 (patch) | |
| tree | 867e6040e861462a0324490cbb3bb51cd0052e54 /nova | |
| parent | 4a058908db774bfebce4ece814534225e123345c (diff) | |
| parent | c42ace8e605b987e683372efb4913d85ee472a70 (diff) | |
| download | nova-fa4e3af4c8d4161cdb90f0ac54f357e9724cbc22.tar.gz nova-fa4e3af4c8d4161cdb90f0ac54f357e9724cbc22.tar.xz nova-fa4e3af4c8d4161cdb90f0ac54f357e9724cbc22.zip | |
Merged trunk
Diffstat (limited to 'nova')
46 files changed, 920 insertions, 217 deletions
diff --git a/nova/adminclient.py b/nova/adminclient.py index 3cdd8347f..c614b274c 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -21,6 +21,7 @@ Nova User API client library. import base64 import boto +import boto.exception import httplib from boto.ec2.regioninfo import RegionInfo @@ -288,10 +289,14 @@ class NovaAdminClient(object): def get_user(self, name): """Grab a single user by name.""" - user = self.apiconn.get_object('DescribeUser', {'Name': name}, - UserInfo) - if user.username != None: - return user + try: + return self.apiconn.get_object('DescribeUser', + {'Name': name}, + UserInfo) + except boto.exception.BotoServerError, e: + if e.status == 400 and e.error_code == 'NotFound': + return None + raise def has_user(self, username): """Determine if user exists.""" @@ -376,6 +381,13 @@ class NovaAdminClient(object): 'MemberUsers': member_users} return self.apiconn.get_object('RegisterProject', params, ProjectInfo) + def modify_project(self, projectname, manager_user=None, description=None): + """Modifies an existing project.""" + params = {'Name': projectname, + 'ManagerUser': manager_user, + 'Description': description} + return self.apiconn.get_status('ModifyProject', params) + def delete_project(self, projectname): """Permanently deletes the specified project.""" return self.apiconn.get_object('DeregisterProject', diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index fc9a37908..ddcdc673c 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -33,6 +33,7 @@ from nova import log as logging from nova import utils from nova import wsgi from nova.api.ec2 import apirequest +from nova.api.ec2 import cloud from nova.auth import manager @@ -170,7 +171,7 @@ class Authenticate(wsgi.Middleware): req.path) # Be explicit for what exceptions are 403, the rest bubble as 500 except (exception.NotFound, exception.NotAuthorized) as ex: - LOG.audit(_("Authentication Failure: %s"), ex.args[0]) + LOG.audit(_("Authentication Failure: %s"), unicode(ex)) raise webob.exc.HTTPForbidden() # Authenticated! @@ -213,7 +214,8 @@ class Requestify(wsgi.Middleware): LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals()) # Success! - api_request = apirequest.APIRequest(self.controller, action, args) + api_request = apirequest.APIRequest(self.controller, action, + req.params['Version'], args) req.environ['ec2.request'] = api_request req.environ['ec2.action_args'] = args return self.application @@ -313,19 +315,32 @@ class Executor(wsgi.Application): result = None try: result = api_request.invoke(context) + except exception.InstanceNotFound as ex: + LOG.info(_('InstanceNotFound raised: %s'), unicode(ex), + context=context) + ec2_id = cloud.id_to_ec2_id(ex.instance_id) + message = _('Instance %s not found') % ec2_id + return self._error(req, context, type(ex).__name__, message) + except exception.VolumeNotFound as ex: + LOG.info(_('VolumeNotFound raised: %s'), unicode(ex), + context=context) + ec2_id = cloud.id_to_ec2_id(ex.volume_id, 'vol-%08x') + message = _('Volume %s not found') % ec2_id + return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: - LOG.info(_('NotFound raised: %s'), ex.args[0], context=context) - return self._error(req, context, type(ex).__name__, ex.args[0]) + LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) + return self._error(req, context, type(ex).__name__, unicode(ex)) except exception.ApiError as ex: - LOG.exception(_('ApiError raised: %s'), ex.args[0], + LOG.exception(_('ApiError raised: %s'), unicode(ex), context=context) if ex.code: - return self._error(req, context, ex.code, ex.args[0]) + return self._error(req, context, ex.code, unicode(ex)) else: - return self._error(req, context, type(ex).__name__, ex.args[0]) + return self._error(req, context, type(ex).__name__, + unicode(ex)) except Exception as ex: extra = {'environment': req.environ} - LOG.exception(_('Unexpected error raised: %s'), ex.args[0], + LOG.exception(_('Unexpected error raised: %s'), unicode(ex), extra=extra, context=context) return self._error(req, context, diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index 4c0628e21..5d5093ded 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -195,6 +195,17 @@ class AdminController(object): description=None, member_users=None)) + def modify_project(self, context, name, manager_user, description=None, + **kwargs): + """Modifies a project""" + msg = _("Modify project: %(name)s managed by" + " %(manager_user)s") % locals() + LOG.audit(msg, context=context) + manager.AuthManager().modify_project(name, + manager_user=manager_user, + description=description) + return True + def deregister_project(self, context, name): """Permanently deletes a project.""" LOG.audit(_("Delete project: %s"), name, context=context) diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index d8a2b5f53..7e72d67fb 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -83,9 +83,10 @@ def _try_convert(value): class APIRequest(object): - def __init__(self, controller, action, args): + def __init__(self, controller, action, version, args): self.controller = controller self.action = action + self.version = version self.args = args def invoke(self, context): @@ -132,7 +133,7 @@ class APIRequest(object): response_el = xml.createElement(self.action + 'Response') response_el.setAttribute('xmlns', - 'http://ec2.amazonaws.com/doc/2009-11-30/') + 'http://ec2.amazonaws.com/doc/%s/' % self.version) request_id_el = xml.createElement('requestId') request_id_el.appendChild(xml.createTextNode(request_id)) response_el.appendChild(request_id_el) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 22b8c19cb..c80e1168a 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -512,8 +512,11 @@ class CloudController(object): def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) - # instance_id is passed in as a list of instances - ec2_id = instance_id[0] + # instance_id may be passed in as a list of instances + if type(instance_id) == list: + ec2_id = instance_id[0] + else: + ec2_id = instance_id instance_id = ec2_id_to_id(ec2_id) output = self.compute_api.get_console_output( context, instance_id=instance_id) @@ -532,12 +535,8 @@ class CloudController(object): volumes = [] for ec2_id in volume_id: internal_id = ec2_id_to_id(ec2_id) - try: - volume = self.volume_api.get(context, internal_id) - volumes.append(volume) - except exception.NotFound: - raise exception.NotFound(_("Volume %s not found") - % ec2_id) + volume = self.volume_api.get(context, internal_id) + volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] @@ -668,12 +667,8 @@ class CloudController(object): instances = [] for ec2_id in instance_id: internal_id = ec2_id_to_id(ec2_id) - try: - instance = self.compute_api.get(context, internal_id) - instances.append(instance) - except exception.NotFound: - raise exception.NotFound(_("Instance %s not found") - % ec2_id) + instance = self.compute_api.get(context, internal_id) + instances.append(instance) else: instances = self.compute_api.get_all(context, **kwargs) for instance in instances: @@ -722,7 +717,12 @@ class CloudController(object): r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] - r['groupSet'] = self._convert_to_set([], 'groups') + security_group_names = [] + if instance.get('security_groups'): + for security_group in instance['security_groups']: + security_group_names.append(security_group['name']) + r['groupSet'] = self._convert_to_set(security_group_names, + 'groupId') r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index c70bb39ed..056c7dd27 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -51,8 +51,8 @@ class FaultWrapper(wsgi.Middleware): try: return req.get_response(self.application) except Exception as ex: - LOG.exception(_("Caught error: %s"), str(ex)) - exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) + LOG.exception(_("Caught error: %s"), unicode(ex)) + exc = webob.exc.HTTPInternalServerError(explanation=unicode(ex)) return faults.Fault(exc) diff --git a/nova/compute/api.py b/nova/compute/api.py index 5d6a42a6b..c7b788225 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -318,7 +318,7 @@ class API(base.Base): def get(self, context, instance_id): """Get a single instance with the given ID.""" - rv = self.db.instance_get_by_id(context, instance_id) + rv = self.db.instance_get(context, instance_id) return dict(rv.iteritems()) def get_all(self, context, project_id=None, reservation_id=None, diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 01abee584..93b23a6f4 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -95,6 +95,7 @@ def get_by_type(instance_type): """retrieve instance type name""" if instance_type is None: return FLAGS.default_instance_type + try: ctxt = context.get_admin_context() inst_type = db.instance_type_get_by_name(ctxt, instance_type) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0f9bf301f..f4418af26 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -37,7 +37,6 @@ terminating it. import datetime import random import string -import logging import socket import functools @@ -231,22 +230,25 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_("Terminating instance %s"), instance_id, context=context) - if not FLAGS.stub_network: - address = self.db.instance_get_floating_address(context, - instance_ref['id']) - if address: - LOG.debug(_("Disassociating address %s"), address, + fixed_ip = instance_ref.get('fixed_ip') + if not FLAGS.stub_network and fixed_ip: + floating_ips = fixed_ip.get('floating_ips') or [] + for floating_ip in floating_ips: + address = floating_ip['address'] + LOG.debug("Disassociating address %s", address, context=context) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. + network_topic = self.db.queue_get_for(context, + FLAGS.network_topic, + floating_ip['host']) rpc.cast(context, - self.get_network_topic(context), + network_topic, {"method": "disassociate_floating_ip", "args": {"floating_address": address}}) - address = self.db.instance_get_fixed_address(context, - instance_ref['id']) + address = fixed_ip['address'] if address: LOG.debug(_("Deallocating address %s"), address, context=context) @@ -256,7 +258,7 @@ class ComputeManager(manager.Manager): self.network_manager.deallocate_fixed_ip(context.elevated(), address) - volumes = instance_ref.get('volumes', []) or [] + volumes = instance_ref.get('volumes') or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) if instance_ref['state'] == power_state.SHUTOFF: diff --git a/nova/db/api.py b/nova/db/api.py index 69f577764..69698ec2a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -379,11 +379,6 @@ def instance_get_project_vpn(context, project_id): return IMPL.instance_get_project_vpn(context, project_id) -def instance_get_by_id(context, instance_id): - """Get an instance by id.""" - return IMPL.instance_get_by_id(context, instance_id) - - def instance_is_vpn(context, instance_id): """True if instance is a vpn.""" return IMPL.instance_is_vpn(context, instance_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f8b0559d2..d669f5ef8 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,6 +19,7 @@ Implementation of SQLAlchemy backend. """ +import datetime import warnings from nova import db @@ -578,7 +579,7 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): 'AND instance_id IS NOT NULL ' 'AND allocated = 0', {'host': host, - 'time': time.isoformat()}) + 'time': time}) return result.rowcount @@ -670,8 +671,14 @@ def instance_data_get_for_project(context, project_id): def instance_destroy(context, instance_id): session = get_session() with session.begin(): - instance_ref = instance_get(context, instance_id, session=session) - instance_ref.delete(session=session) + session.execute('update instances set deleted=1,' + 'deleted_at=:at where id=:id', + {'id': instance_id, + 'at': datetime.datetime.utcnow()}) + session.execute('update security_group_instance_association ' + 'set deleted=1,deleted_at=:at where instance_id=:id', + {'id': instance_id, + 'at': datetime.datetime.utcnow()}) @require_context @@ -685,6 +692,7 @@ def instance_get(context, instance_id, session=None): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -698,7 +706,9 @@ def instance_get(context, instance_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound(_('No instance for id %s') % instance_id) + raise exception.InstanceNotFound(_('Instance %s not found') + % instance_id, + instance_id) return result @@ -782,33 +792,6 @@ def instance_get_project_vpn(context, project_id): @require_context -def instance_get_by_id(context, instance_id): - session = get_session() - - if is_admin_context(context): - result = session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.network')).\ - filter_by(id=instance_id).\ - filter_by(deleted=can_read_deleted(context)).\ - first() - elif is_user_context(context): - result = session.query(models.Instance).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload_all('fixed_ip.network')).\ - filter_by(project_id=context.project_id).\ - filter_by(id=instance_id).\ - filter_by(deleted=False).\ - first() - if not result: - raise exception.NotFound(_('Instance %s not found') % (instance_id)) - - return result - - -@require_context def instance_get_fixed_address(context, instance_id): session = get_session() with session.begin(): @@ -1419,7 +1402,8 @@ def volume_get(context, volume_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound(_('No volume for id %s') % volume_id) + raise exception.VolumeNotFound(_('Volume %s not found') % volume_id, + volume_id) return result @@ -1464,7 +1448,8 @@ def volume_get_instance(context, volume_id): options(joinedload('instance')).\ first() if not result: - raise exception.NotFound(_('Volume %s not found') % ec2_id) + raise exception.VolumeNotFound(_('Volume %s not found') % volume_id, + volume_id) return result.instance @@ -1605,6 +1590,11 @@ def security_group_destroy(context, security_group_id): # TODO(vish): do we have to use sql here? session.execute('update security_groups set deleted=1 where id=:id', {'id': security_group_id}) + session.execute('update security_group_instance_association ' + 'set deleted=1,deleted_at=:at ' + 'where security_group_id=:id', + {'id': security_group_id, + 'at': datetime.datetime.utcnow()}) session.execute('update security_group_rules set deleted=1 ' 'where group_id=:id', {'id': security_group_id}) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py index a312a7190..366944591 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -134,6 +134,9 @@ instances = Table('instances', meta, Column('ramdisk_id', String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), + Column('server_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), Column('launch_index', Integer()), Column('key_name', String(length=255, convert_unicode=False, assert_unicode=None, @@ -178,23 +181,6 @@ instances = Table('instances', meta, ) -iscsi_targets = Table('iscsi_targets', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('target_num', Integer()), - Column('host', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('volume_id', - Integer(), - ForeignKey('volumes.id'), - nullable=True), - ) - - key_pairs = Table('key_pairs', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), @@ -523,7 +509,7 @@ def upgrade(migrate_engine): meta.bind = migrate_engine for table in (auth_tokens, export_devices, fixed_ips, floating_ips, - instances, iscsi_targets, key_pairs, networks, + instances, key_pairs, networks, projects, quotas, security_groups, security_group_inst_assoc, security_group_rules, services, users, user_project_association, user_project_role_association, @@ -539,7 +525,7 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. for table in (auth_tokens, export_devices, fixed_ips, floating_ips, - instances, iscsi_targets, key_pairs, networks, + instances, key_pairs, networks, projects, quotas, security_groups, security_group_inst_assoc, security_group_rules, services, users, user_project_association, user_project_role_association, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py index bd3a3e6f8..699b837f8 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -41,6 +41,10 @@ networks = Table('networks', meta, Column('id', Integer(), primary_key=True, nullable=False), ) +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + # # New Tables @@ -131,6 +135,23 @@ instance_actions = Table('instance_actions', meta, ) +iscsi_targets = Table('iscsi_targets', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('target_num', Integer()), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + # # Tables to alter # @@ -188,7 +209,8 @@ def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine - for table in (certificates, consoles, console_pools, instance_actions): + for table in (certificates, consoles, console_pools, instance_actions, + iscsi_targets): try: table.create() except Exception: diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index 33d14827b..2a13c5466 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -46,12 +46,15 @@ def db_version(): meta.reflect(bind=engine) try: for table in ('auth_tokens', 'export_devices', 'fixed_ips', - 'floating_ips', 'instances', 'iscsi_targets', + 'floating_ips', 'instances', 'key_pairs', 'networks', 'projects', 'quotas', - 'security_group_rules', - 'security_group_instance_association', 'services', + 'security_group_instance_association', + 'security_group_rules', 'security_groups', + 'services', 'users', 'user_project_association', - 'user_project_role_association', 'volumes'): + 'user_project_role_association', + 'user_role_association', + 'volumes'): assert table in meta.tables return db_version_control(1) except AssertionError: diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 8ee3e3532..37b2fd7c7 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -325,10 +325,14 @@ class SecurityGroup(BASE, NovaBase): secondary="security_group_instance_association", primaryjoin='and_(' 'SecurityGroup.id == ' - 'SecurityGroupInstanceAssociation.security_group_id,' + 'SecurityGroupInstanceAssociation.security_group_id,' + 'SecurityGroupInstanceAssociation.deleted == False,' 'SecurityGroup.deleted == False)', secondaryjoin='and_(' 'SecurityGroupInstanceAssociation.instance_id == Instance.id,' + # (anthony) the condition below shouldn't be necessary now that the + # association is being marked as deleted. However, removing this + # may cause existing deployments to choke, so I'm leaving it 'Instance.deleted == False)', backref='security_groups') diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index dc885f138..4a9a28f43 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -20,6 +20,7 @@ Session Handling for SQLAlchemy backend """ from sqlalchemy import create_engine +from sqlalchemy import pool from sqlalchemy.orm import sessionmaker from nova import exception @@ -37,9 +38,14 @@ def get_session(autocommit=True, expire_on_commit=False): global _MAKER if not _MAKER: if not _ENGINE: + kwargs = {'pool_recycle': FLAGS.sql_idle_timeout, + 'echo': False} + + if FLAGS.sql_connection.startswith('sqlite'): + kwargs['poolclass'] = pool.NullPool + _ENGINE = create_engine(FLAGS.sql_connection, - pool_recycle=FLAGS.sql_idle_timeout, - echo=False) + **kwargs) _MAKER = (sessionmaker(bind=_ENGINE, autocommit=autocommit, expire_on_commit=expire_on_commit)) diff --git a/nova/exception.py b/nova/exception.py index f604fd63a..7d65bd6a5 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -46,7 +46,6 @@ class Error(Exception): class ApiError(Error): - def __init__(self, message='Unknown', code='Unknown'): self.message = message self.code = code @@ -57,6 +56,18 @@ class NotFound(Error): pass +class InstanceNotFound(NotFound): + def __init__(self, message, instance_id): + self.instance_id = instance_id + super(InstanceNotFound, self).__init__(message) + + +class VolumeNotFound(NotFound): + def __init__(self, message, volume_id): + self.volume_id = volume_id + super(VolumeNotFound, self).__init__(message) + + class Duplicate(Error): pass diff --git a/nova/flags.py b/nova/flags.py index 43bc174d2..3ba3fe6fa 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -208,7 +208,7 @@ def _get_my_ip(): (addr, port) = csock.getsockname() csock.close() return addr - except socket.gaierror as ex: + except socket.error as ex: return "127.0.0.1" @@ -286,8 +286,8 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), DEFINE_string('sql_connection', 'sqlite:///$state_path/nova.sqlite', 'connection string for sql database') -DEFINE_string('sql_idle_timeout', - '3600', +DEFINE_integer('sql_idle_timeout', + 3600, 'timeout for idle sql database connections') DEFINE_integer('sql_max_retries', 12, 'sql connection attempts') DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval') diff --git a/nova/image/local.py b/nova/image/local.py index b44593221..f78b9aa89 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -18,6 +18,7 @@ import cPickle as pickle import os.path import random +import tempfile from nova import exception from nova.image import service @@ -26,15 +27,12 @@ from nova.image import service class LocalImageService(service.BaseImageService): """Image service storing images to local disk. + It assumes that image_ids are integers. - It assumes that image_ids are integers.""" + """ def __init__(self): - self._path = "/tmp/nova/images" - try: - os.makedirs(self._path) - except OSError: # Exists - pass + self._path = tempfile.mkdtemp() def _path_to(self, image_id): return os.path.join(self._path, str(image_id)) @@ -56,9 +54,7 @@ class LocalImageService(service.BaseImageService): raise exception.NotFound def create(self, context, data): - """ - Store the image data and return the new image id. - """ + """Store the image data and return the new image id.""" id = random.randint(0, 2 ** 31 - 1) data['id'] = id self.update(context, id, data) @@ -72,8 +68,9 @@ class LocalImageService(service.BaseImageService): raise exception.NotFound def delete(self, context, image_id): - """ - Delete the given image. Raises OSError if the image does not exist. + """Delete the given image. + Raises OSError if the image does not exist. + """ try: os.unlink(self._path_to(image_id)) @@ -81,8 +78,13 @@ class LocalImageService(service.BaseImageService): raise exception.NotFound def delete_all(self): - """ - Clears out all images in local directory - """ + """Clears out all images in local directory.""" for id in self._ids(): os.unlink(self._path_to(id)) + + def delete_imagedir(self): + """Deletes the local directory. + Raises OSError if directory is not empty. + + """ + os.rmdir(self._path) diff --git a/nova/image/s3.py b/nova/image/s3.py index c60b215a0..08a40f191 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -69,9 +69,7 @@ class S3ImageService(service.BaseImageService): """S3 has imageId but OpenStack wants id""" for image in images: if 'imageId' in image: - image_id = image['imageId'] - del image['imageId'] - image['id'] = image_id + image['id'] = image['imageId'] return images def index(self, context): diff --git a/nova/log.py b/nova/log.py index e1c9f46f4..b541488bd 100644 --- a/nova/log.py +++ b/nova/log.py @@ -31,6 +31,7 @@ import cStringIO import json import logging import logging.handlers +import sys import traceback from nova import flags @@ -191,6 +192,12 @@ class NovaLogger(logging.Logger): kwargs.pop('exc_info') self.error(message, **kwargs) + +def handle_exception(type, value, tb): + logging.root.critical(str(value), exc_info=(type, value, tb)) + + +sys.excepthook = handle_exception logging.setLoggerClass(NovaLogger) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index d29e17603..ed37e8ba7 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -20,6 +20,7 @@ Implements vlans, bridges, and iptables rules using linux utilities. import os from nova import db +from nova import exception from nova import flags from nova import log as logging from nova import utils @@ -37,6 +38,9 @@ FLAGS = flags.FLAGS flags.DEFINE_string('dhcpbridge_flagfile', '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') +flags.DEFINE_string('dhcp_domain', + 'novalocal', + 'domain to use for building the hostnames') flags.DEFINE_string('networks_path', '$state_path/networks', 'Location to keep network config files') @@ -152,6 +156,8 @@ def ensure_floating_forward(floating_ip, fixed_ip): """Ensure floating ip forwarding rule""" _confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s" % (floating_ip, fixed_ip)) + _confirm_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s" + % (floating_ip, fixed_ip)) _confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s" % (fixed_ip, floating_ip)) @@ -160,6 +166,8 @@ def remove_floating_forward(floating_ip, fixed_ip): """Remove forwarding for floating ip""" _remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s" % (floating_ip, fixed_ip)) + _remove_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s" + % (floating_ip, fixed_ip)) _remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s" % (fixed_ip, floating_ip)) @@ -177,32 +185,77 @@ def ensure_vlan(vlan_num): LOG.debug(_("Starting VLAN inteface %s"), interface) _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) - _execute("sudo ifconfig %s up" % interface) + _execute("sudo ip link set %s up" % interface) return interface def ensure_bridge(bridge, interface, net_attrs=None): - """Create a bridge unless it already exists""" + """Create a bridge unless it already exists. + + :param interface: the interface to create the bridge on. + :param net_attrs: dictionary with attributes used to create the bridge. + + If net_attrs is set, it will add the net_attrs['gateway'] to the bridge + using net_attrs['broadcast'] and net_attrs['cidr']. It will also add + the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set. + + The code will attempt to move any ips that already exist on the interface + onto the bridge and reset the default gateway if necessary. + """ if not _device_exists(bridge): LOG.debug(_("Starting Bridge interface for %s"), interface) _execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl setfd %s 0" % bridge) # _execute("sudo brctl setageing %s 10" % bridge) _execute("sudo brctl stp %s off" % bridge) - if interface: - _execute("sudo brctl addif %s %s" % (bridge, interface)) + _execute("sudo ip link set %s up" % bridge) if net_attrs: - _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ - (bridge, - net_attrs['gateway'], - net_attrs['broadcast'], - net_attrs['netmask'])) + # NOTE(vish): The ip for dnsmasq has to be the first address on the + # bridge for it to respond to reqests properly + suffix = net_attrs['cidr'].rpartition('/')[2] + out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" % + (net_attrs['gateway'], + suffix, + net_attrs['broadcast'], + bridge), + check_exit_code=False) + if err and err != "RTNETLINK answers: File exists\n": + raise exception.Error("Failed to add ip: %s" % err) if(FLAGS.use_ipv6): - _execute("sudo ifconfig %s add %s up" % \ - (bridge, - net_attrs['cidr_v6'])) - else: - _execute("sudo ifconfig %s up" % bridge) + _execute("sudo ip -f inet6 addr change %s dev %s" % + (net_attrs['cidr_v6'], bridge)) + # NOTE(vish): If the public interface is the same as the + # bridge, then the bridge has to be in promiscuous + # to forward packets properly. + if(FLAGS.public_interface == bridge): + _execute("sudo ip link set dev %s promisc on" % bridge) + if interface: + # NOTE(vish): This will break if there is already an ip on the + # interface, so we move any ips to the bridge + gateway = None + out, err = _execute("sudo route -n") + for line in out.split("\n"): + fields = line.split() + if fields and fields[0] == "0.0.0.0" and fields[-1] == interface: + gateway = fields[1] + out, err = _execute("sudo ip addr show dev %s scope global" % + interface) + for line in out.split("\n"): + fields = line.split() + if fields and fields[0] == "inet": + params = ' '.join(fields[1:-1]) + _execute("sudo ip addr del %s dev %s" % (params, fields[-1])) + _execute("sudo ip addr add %s dev %s" % (params, bridge)) + if gateway: + _execute("sudo route add 0.0.0.0 gw %s" % gateway) + out, err = _execute("sudo brctl addif %s %s" % + (bridge, interface), + check_exit_code=False) + + if (err and err != "device %s is already a member of a bridge; can't " + "enslave it to bridge %s.\n" % (interface, bridge)): + raise exception.Error("Failed to add interface: %s" % err) + if FLAGS.use_nova_chains: (out, err) = _execute("sudo iptables -N nova_forward", check_exit_code=False) @@ -298,10 +351,9 @@ interface %s % pid, check_exit_code=False) if conffile in out: try: - _execute('sudo kill -HUP %d' % pid) - return + _execute('sudo kill %d' % pid) except Exception as exc: # pylint: disable-msg=W0703 - LOG.debug(_("Hupping radvd threw %s"), exc) + LOG.debug(_("killing radvd threw %s"), exc) else: LOG.debug(_("Pid %d is stale, relaunching radvd"), pid) command = _ra_cmd(network_ref) @@ -314,8 +366,9 @@ interface %s def _host_dhcp(fixed_ip_ref): """Return a host string for an address""" instance_ref = fixed_ip_ref['instance'] - return "%s,%s.novalocal,%s" % (instance_ref['mac_address'], + return "%s,%s.%s,%s" % (instance_ref['mac_address'], instance_ref['hostname'], + FLAGS.dhcp_domain, fixed_ip_ref['address']) @@ -330,7 +383,8 @@ def _execute(cmd, *args, **kwargs): def _device_exists(device): """Check if ethernet device exists""" - (_out, err) = _execute("ifconfig %s" % device, check_exit_code=False) + (_out, err) = _execute("ip link show dev %s" % device, + check_exit_code=False) return not err @@ -360,6 +414,7 @@ def _dnsmasq_cmd(net): ' --strict-order', ' --bind-interfaces', ' --conf-file=', + ' --domain=%s' % FLAGS.dhcp_domain, ' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'), ' --listen-address=%s' % net['gateway'], ' --except-interface=lo', diff --git a/nova/network/manager.py b/nova/network/manager.py index fe99f2612..8eb9f041b 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -118,6 +118,10 @@ class NetworkManager(manager.Manager): super(NetworkManager, self).__init__(*args, **kwargs) def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service. + """ + self.driver.init_host() # Set up networking for the projects for which we're already # the designated network host. ctxt = context.get_admin_context() @@ -395,7 +399,6 @@ class FlatDHCPManager(FlatManager): standalone service. """ super(FlatDHCPManager, self).init_host() - self.driver.init_host() self.driver.metadata_forward() def setup_compute_network(self, context, instance_id): @@ -428,6 +431,10 @@ class FlatDHCPManager(FlatManager): self.driver.ensure_bridge(network_ref['bridge'], FLAGS.flat_interface, network_ref) + if not FLAGS.fake_network: + self.driver.update_dhcp(context, network_id) + if(FLAGS.use_ipv6): + self.driver.update_ra(context, network_id) class VlanManager(NetworkManager): @@ -461,7 +468,6 @@ class VlanManager(NetworkManager): standalone service. """ super(VlanManager, self).init_host() - self.driver.init_host() self.driver.metadata_forward() def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): @@ -497,7 +503,7 @@ class VlanManager(NetworkManager): network_ref['bridge']) def create_networks(self, context, cidr, num_networks, network_size, - vlan_start, vpn_start, cidr_v6): + cidr_v6, vlan_start, vpn_start): """Create networks based on parameters.""" fixed_net = IPy.IP(cidr) fixed_net_v6 = IPy.IP(cidr_v6) diff --git a/nova/rpc.py b/nova/rpc.py index 01fc6d44b..2b1f7298b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -46,7 +46,7 @@ LOG = logging.getLogger('nova.rpc') class Connection(carrot_connection.BrokerConnection): """Connection instance object""" @classmethod - def instance(cls, new=False): + def instance(cls, new=True): """Returns the instance""" if new or not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, @@ -246,7 +246,7 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = Connection.instance(True) + conn = Connection.instance() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) @@ -319,7 +319,7 @@ def call(context, topic, msg): self.result = data['result'] wait_msg = WaitMessage() - conn = Connection.instance(True) + conn = Connection.instance() consumer = DirectConsumer(connection=conn, msg_id=msg_id) consumer.register_callback(wait_msg) diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index baf4966d4..0191ceb3d 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -43,7 +43,9 @@ class SimpleScheduler(chance.ChanceScheduler): def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) - if instance_ref['availability_zone'] and context.is_admin: + if (instance_ref['availability_zone'] + and ':' in instance_ref['availability_zone'] + and context.is_admin): zone, _x, host = instance_ref['availability_zone'].partition(':') service = db.service_get_by_args(context.elevated(), host, 'nova-compute') @@ -75,7 +77,9 @@ class SimpleScheduler(chance.ChanceScheduler): def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" volume_ref = db.volume_get(context, volume_id) - if (':' in volume_ref['availability_zone']) and context.is_admin: + if (volume_ref['availability_zone'] + and ':' in volume_ref['availability_zone'] + and context.is_admin): zone, _x, host = volume_ref['availability_zone'].partition(':') service = db.service_get_by_args(context.elevated(), host, 'nova-volume') diff --git a/nova/service.py b/nova/service.py index 2c30997f2..59648adf2 100644 --- a/nova/service.py +++ b/nova/service.py @@ -157,8 +157,9 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - logging.audit(_("Starting %s node (version %s)"), topic, - version.version_string_with_vcs()) + vcs_string = version.version_string_with_vcs() + logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)") + % locals()) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) diff --git a/nova/test.py b/nova/test.py index cd049f007..d498a573b 100644 --- a/nova/test.py +++ b/nova/test.py @@ -77,9 +77,10 @@ class TestCase(unittest.TestCase): network_manager.VlanManager().create_networks(ctxt, FLAGS.fixed_range, 5, 16, + FLAGS.fixed_range_v6, FLAGS.vlan_start, FLAGS.vpn_start, - FLAGS.fixed_range_v6) + ) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 5d9ddefbe..8ab4d7569 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -143,6 +143,7 @@ class LocalImageServiceTest(unittest.TestCase, def tearDown(self): self.service.delete_all() + self.service.delete_imagedir() self.stubs.UnsetAll() diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 29883e7c8..724f14f19 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -76,7 +76,7 @@ class ServersTest(unittest.TestCase): fakes.stub_out_key_pair_funcs(self.stubs) fakes.stub_out_image_service(self.stubs) self.stubs.Set(nova.db.api, 'instance_get_all', return_servers) - self.stubs.Set(nova.db.api, 'instance_get_by_id', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers) self.stubs.Set(nova.db.api, 'instance_add_security_group', diff --git a/nova/tests/db/nova.austin.sqlite b/nova/tests/db/nova.austin.sqlite Binary files differnew file mode 100644 index 000000000..ad1326bce --- /dev/null +++ b/nova/tests/db/nova.austin.sqlite diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 66a16b0cb..2569e262b 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -36,6 +36,7 @@ from nova.auth import manager class FakeHttplibSocket(object): """a fake socket implementation for httplib.HTTPResponse, trivial""" def __init__(self, response_string): + self.response_string = response_string self._buffer = StringIO.StringIO(response_string) def makefile(self, _mode, _other): @@ -66,13 +67,16 @@ class FakeHttplibConnection(object): # For some reason, the response doesn't have "HTTP/1.0 " prepended; I # guess that's a function the web server usually provides. resp = "HTTP/1.0 %s" % resp - sock = FakeHttplibSocket(resp) - self.http_response = httplib.HTTPResponse(sock) + self.sock = FakeHttplibSocket(resp) + self.http_response = httplib.HTTPResponse(self.sock) self.http_response.begin() def getresponse(self): return self.http_response + def getresponsebody(self): + return self.sock.response_string + def close(self): """Required for compatibility with boto/tornado""" pass @@ -104,7 +108,7 @@ class ApiEc2TestCase(test.TestCase): self.app = ec2.Authenticate(ec2.Requestify(ec2.Executor(), 'nova.api.ec2.cloud.CloudController')) - def expect_http(self, host=None, is_secure=False): + def expect_http(self, host=None, is_secure=False, api_version=None): """Returns a new EC2 connection""" self.ec2 = boto.connect_ec2( aws_access_key_id='fake', @@ -113,13 +117,31 @@ class ApiEc2TestCase(test.TestCase): region=regioninfo.RegionInfo(None, 'test', self.host), port=8773, path='/services/Cloud') + if api_version: + self.ec2.APIVersion = api_version self.mox.StubOutWithMock(self.ec2, 'new_http_connection') - http = FakeHttplibConnection( + self.http = FakeHttplibConnection( self.app, '%s:8773' % (self.host), False) # pylint: disable-msg=E1103 - self.ec2.new_http_connection(host, is_secure).AndReturn(http) - return http + self.ec2.new_http_connection(host, is_secure).AndReturn(self.http) + return self.http + + def test_xmlns_version_matches_request_version(self): + self.expect_http(api_version='2010-10-30') + self.mox.ReplayAll() + + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + + # Any request should be fine + self.ec2.get_all_instances() + self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(), + 'The version in the xmlns of the response does ' + 'not match the API version given in the request.') + + self.manager.delete_project(project) + self.manager.delete_user(user) def test_describe_instances(self): """Test that, after creating a user and a project, the describe diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 09f6ee94a..2aa0690e7 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -49,7 +49,7 @@ class ComputeTestCase(test.TestCase): self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() + self.context = context.RequestContext('fake', 'fake', False) def tearDown(self): self.manager.delete_user(self.user) @@ -69,6 +69,13 @@ class ComputeTestCase(test.TestCase): inst['ami_launch_index'] = 0 return db.instance_create(self.context, inst)['id'] + def _create_group(self): + values = {'name': 'testgroup', + 'description': 'testgroup', + 'user_id': self.user.id, + 'project_id': self.project.id} + return db.security_group_create(self.context, values) + def test_create_instance_defaults_display_name(self): """Verify that an instance cannot be created without a display_name.""" cases = [dict(), dict(display_name=None)] @@ -82,21 +89,53 @@ class ComputeTestCase(test.TestCase): def test_create_instance_associates_security_groups(self): """Make sure create associates security groups""" - values = {'name': 'default', - 'description': 'default', - 'user_id': self.user.id, - 'project_id': self.project.id} - group = db.security_group_create(self.context, values) + group = self._create_group() ref = self.compute_api.create( self.context, instance_type=FLAGS.default_instance_type, image_id=None, - security_group=['default']) + security_group=['testgroup']) try: self.assertEqual(len(db.security_group_get_by_instance( - self.context, ref[0]['id'])), 1) + self.context, ref[0]['id'])), 1) + group = db.security_group_get(self.context, group['id']) + self.assert_(len(group.instances) == 1) + finally: + db.security_group_destroy(self.context, group['id']) + db.instance_destroy(self.context, ref[0]['id']) + + def test_destroy_instance_disassociates_security_groups(self): + """Make sure destroying disassociates security groups""" + group = self._create_group() + + ref = self.compute_api.create( + self.context, + instance_type=FLAGS.default_instance_type, + image_id=None, + security_group=['testgroup']) + try: + db.instance_destroy(self.context, ref[0]['id']) + group = db.security_group_get(self.context, group['id']) + self.assert_(len(group.instances) == 0) finally: db.security_group_destroy(self.context, group['id']) + + def test_destroy_security_group_disassociates_instances(self): + """Make sure destroying security groups disassociates instances""" + group = self._create_group() + + ref = self.compute_api.create( + self.context, + instance_type=FLAGS.default_instance_type, + image_id=None, + security_group=['testgroup']) + + try: + db.security_group_destroy(self.context, group['id']) + group = db.security_group_get(context.get_admin_context( + read_deleted=True), group['id']) + self.assert_(len(group.instances) == 0) + finally: db.instance_destroy(self.context, ref[0]['id']) def test_run_terminate(self): diff --git a/nova/tests/test_localization.py b/nova/tests/test_localization.py new file mode 100644 index 000000000..6992773f5 --- /dev/null +++ b/nova/tests/test_localization.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import glob +import logging +import os +import re +import sys +import unittest + +import nova + + +class LocalizationTestCase(unittest.TestCase): + def test_multiple_positional_format_placeholders(self): + pat = re.compile("\W_\(") + single_pat = re.compile("\W%\W") + root_path = os.path.dirname(nova.__file__) + problems = {} + for root, dirs, files in os.walk(root_path): + for fname in files: + if not fname.endswith(".py"): + continue + pth = os.path.join(root, fname) + txt = fulltext = file(pth).read() + txt_lines = fulltext.splitlines() + if not pat.search(txt): + continue + problems[pth] = [] + pos = txt.find("_(") + while pos > -1: + # Make sure that this isn't part of a dunder; + # e.g., __init__(... + # or something like 'self.assert_(...' + test_txt = txt[pos - 1: pos + 10] + if not (pat.search(test_txt)): + txt = txt[pos + 2:] + pos = txt.find("_(") + continue + pos += 2 + txt = txt[pos:] + innerChars = [] + # Count pairs of open/close parens until _() closing + # paren is found. + parenCount = 1 + pos = 0 + while parenCount > 0: + char = txt[pos] + if char == "(": + parenCount += 1 + elif char == ")": + parenCount -= 1 + innerChars.append(char) + pos += 1 + inner_all = "".join(innerChars) + # Filter out '%%' and '%(' + inner = inner_all.replace("%%", "").replace("%(", "") + # Filter out the single '%' operators + inner = single_pat.sub("", inner) + # Within the remaining content, count % + fmtCount = inner.count("%") + if fmtCount > 1: + inner_first = inner_all.splitlines()[0] + lns = ["%s" % (p + 1) + for p, t in enumerate(txt_lines) + if inner_first in t] + lnums = ", ".join(lns) + # Using ugly string concatenation to avoid having + # this test fail itself. + inner_all = "_" + "(" + "%s" % inner_all + problems[pth].append("Line: %s Text: %s" % + (lnums, inner_all)) + # Look for more + pos = txt.find("_(") + if not problems[pth]: + del problems[pth] + if problems: + out = ["Problem(s) found in localized string formatting", + "(see http://www.gnu.org/software/hello/manual/" + "gettext/Python.html for more information)", + "", + " ------------ Files to fix ------------"] + for pth in problems: + out.append(" %s:" % pth) + for val in set(problems[pth]): + out.append(" %s" % val) + raise AssertionError("\n".join(out)) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 0b9b847a0..6e5a0114b 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -221,7 +221,12 @@ class IptablesFirewallTestCase(test.TestCase): self.project = self.manager.create_project('fake', 'fake', 'fake') self.context = context.RequestContext('fake', 'fake') self.network = utils.import_object(FLAGS.network_manager) - self.fw = libvirt_conn.IptablesFirewallDriver() + + class FakeLibvirtConnection(object): + pass + self.fake_libvirt_connection = FakeLibvirtConnection() + self.fw = libvirt_conn.IptablesFirewallDriver( + get_connection=lambda: self.fake_libvirt_connection) def tearDown(self): self.manager.delete_project(self.project) @@ -474,6 +479,19 @@ class NWFilterTestCase(test.TestCase): 'project_id': 'fake'}) inst_id = instance_ref['id'] + ip = '10.11.12.13' + + network_ref = db.project_get_network(self.context, + 'fake') + + fixed_ip = {'address': ip, + 'network_id': network_ref['id']} + + admin_ctxt = context.get_admin_context() + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id']}) + def _ensure_all_called(): instance_filter = 'nova-instance-%s' % instance_ref['name'] secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] diff --git a/nova/utils.py b/nova/utils.py index 2f3bd2894..8d7ff1f64 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -152,6 +152,42 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): return result +def ssh_execute(ssh, cmd, process_input=None, + addl_env=None, check_exit_code=True): + LOG.debug(_("Running cmd (SSH): %s"), cmd) + if addl_env: + raise exception.Error("Environment not supported over SSH") + + if process_input: + # This is (probably) fixable if we need it... + raise exception.Error("process_input not supported over SSH") + + stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) + channel = stdout_stream.channel + + #stdin.write('process_input would go here') + #stdin.flush() + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug(_("Result was %s") % exit_status) + if check_exit_code and exit_status != 0: + raise exception.ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=cmd) + + return (stdout, stderr) + + def abspath(s): return os.path.join(os.path.dirname(__file__), s) @@ -206,21 +242,17 @@ def last_octet(address): def get_my_linklocal(interface): try: if_str = execute("ip -f inet6 -o addr show %s" % interface) - condition = "\s+inet6\s+([0-9a-f:]+/\d+)\s+scope\s+link" + condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link" links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] if address[0] is not None: return address[0] else: - return 'fe00::' - except IndexError as ex: - LOG.warn(_("Couldn't get Link Local IP of %(interface)s :%(ex)s") - % locals()) - except ProcessExecutionError as ex: - LOG.warn(_("Couldn't get Link Local IP of %(interface)s :%(ex)s") - % locals()) - except: - return 'fe00::' + raise exception.Error(_("Link Local address is not found.:%s") + % if_str) + except Exception as ex: + raise exception.Error(_("Couldn't get Link Local IP of %(interface)s" + " :%(ex)s") % locals()) def to_global_ipv6(prefix, mac): diff --git a/nova/version.py b/nova/version.py index 7b27acb6a..c3ecc2245 100644 --- a/nova/version.py +++ b/nova/version.py @@ -21,7 +21,7 @@ except ImportError: 'revision_id': 'LOCALREVISION', 'revno': 0} -NOVA_VERSION = ['2011', '1'] +NOVA_VERSION = ['2011', '2'] YEAR, COUNT = NOVA_VERSION FINAL = False # This becomes true at Release Candidate time diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 5afa3221d..29d18dac5 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -191,7 +191,7 @@ class HyperVConnection(object): vcpus = long(instance['vcpus']) procsetting.VirtualQuantity = vcpus procsetting.Reservation = vcpus - procsetting.Limit = vcpus + procsetting.Limit = 100000 # static assignment to 100% (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( vm.path_(), [procsetting.GetText_(1)]) diff --git a/nova/virt/images.py b/nova/virt/images.py index 9c987e14d..7a6fef330 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -111,5 +111,8 @@ def _image_path(path): def image_url(image): + if FLAGS.image_service == "nova.image.glance.GlanceImageService": + return "http://%s:%s/images/%s" % (FLAGS.glance_host, + FLAGS.glance_port, image) return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, image) diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index 8139c3620..88bfbc668 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -75,11 +75,13 @@ <!-- <model type='virtio'/> CANT RUN virtio network right now --> <filterref filter="nova-instance-${name}"> <parameter name="IP" value="${ip_address}" /> - <parameter name="DHCPSERVER" value="${dhcp_server}" /> - <parameter name="RASERVER" value="${ra_server}" /> + <parameter name="DHCPSERVER" value="${dhcp_server}" /> #if $getVar('extra_params', False) ${extra_params} #end if +#if $getVar('ra_server', False) + <parameter name="RASERVER" value="${ra_server}" /> +#end if </filterref> </interface> diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 35b78368e..112ccd43a 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -150,13 +150,8 @@ class LibvirtConnection(object): self._wrapped_conn = None self.read_only = read_only - self.nwfilter = NWFilterFirewall(self._get_connection) - - if not FLAGS.firewall_driver: - self.firewall_driver = self.nwfilter - self.nwfilter.handle_security_groups = True - else: - self.firewall_driver = utils.import_object(FLAGS.firewall_driver) + fw_class = utils.import_class(FLAGS.firewall_driver) + self.firewall_driver = fw_class(get_connection=self._get_connection) def init_host(self, host): # Adopt existing VM's running here @@ -410,7 +405,7 @@ class LibvirtConnection(object): instance['id'], power_state.NOSTATE, 'launching') - self.nwfilter.setup_basic_filtering(instance) + self.firewall_driver.setup_basic_filtering(instance) self.firewall_driver.prepare_instance_filter(instance) self._create_image(instance, xml) self._conn.createXML(xml, 0) @@ -680,8 +675,7 @@ class LibvirtConnection(object): # Assume that the gateway also acts as the dhcp server. dhcp_server = network['gateway'] ra_server = network['ra_server'] - if not ra_server: - ra_server = 'fd00::' + if FLAGS.allow_project_net_traffic: if FLAGS.use_ipv6: net, mask = _get_net_and_mask(network['cidr']) @@ -720,11 +714,13 @@ class LibvirtConnection(object): 'mac_address': instance['mac_address'], 'ip_address': ip_address, 'dhcp_server': dhcp_server, - 'ra_server': ra_server, 'extra_params': extra_params, 'rescue': rescue, 'local': instance_type['local_gb'], 'driver_type': driver_type} + + if ra_server: + xml_info['ra_server'] = ra_server + "/128" if not rescue: if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" @@ -907,6 +903,20 @@ class FirewallDriver(object): the security group.""" raise NotImplementedError() + def setup_basic_filtering(self, instance): + """Create rules to block spoofing and allow dhcp. + + This gets called when spawning an instance, before + :method:`prepare_instance_filter`. + + """ + raise NotImplementedError() + + def _ra_server_for_instance(self, instance): + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) + return network['ra_server'] + class NWFilterFirewall(FirewallDriver): """ @@ -954,11 +964,15 @@ class NWFilterFirewall(FirewallDriver): """ - def __init__(self, get_connection): + def __init__(self, get_connection, **kwargs): self._libvirt_get_connection = get_connection self.static_filters_configured = False self.handle_security_groups = False + def apply_instance_filter(self, instance): + """No-op. Everything is done in prepare_instance_filter""" + pass + def _get_connection(self): return self._libvirt_get_connection() _conn = property(_get_connection) @@ -1117,7 +1131,9 @@ class NWFilterFirewall(FirewallDriver): 'nova-base-ipv6', 'nova-allow-dhcp-server'] if FLAGS.use_ipv6: - instance_secgroup_filter_children += ['nova-allow-ra-server'] + ra_server = self._ra_server_for_instance(instance) + if ra_server: + instance_secgroup_filter_children += ['nova-allow-ra-server'] ctxt = context.get_admin_context() @@ -1144,10 +1160,6 @@ class NWFilterFirewall(FirewallDriver): return - def apply_instance_filter(self, instance): - """No-op. Everything is done in prepare_instance_filter""" - pass - def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -1195,9 +1207,14 @@ class NWFilterFirewall(FirewallDriver): class IptablesFirewallDriver(FirewallDriver): - def __init__(self, execute=None): + def __init__(self, execute=None, **kwargs): self.execute = execute or utils.execute self.instances = {} + self.nwfilter = NWFilterFirewall(kwargs['get_connection']) + + def setup_basic_filtering(self, instance): + """Use NWFilter from libvirt for this.""" + return self.nwfilter.setup_basic_filtering(instance) def apply_instance_filter(self, instance): """No-op. Everything is done in prepare_instance_filter""" @@ -1303,8 +1320,9 @@ class IptablesFirewallDriver(FirewallDriver): elif(ip_version == 6): # Allow RA responses ra_server = self._ra_server_for_instance(instance) - our_rules += ['-A %s -s %s -p icmpv6 ' - '-j ACCEPT' % (chain_name, ra_server)] + if ra_server: + our_rules += ['-A %s -s %s -p icmpv6 -j ACCEPT' % + (chain_name, ra_server + "/128")] #Allow project network traffic if (FLAGS.allow_project_net_traffic): cidrv6 = self._project_cidrv6_for_instance(instance) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index a9308eea1..6813c4da1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -641,7 +641,7 @@ def with_vdi_attached_here(session, vdi, read_only, f): session.get_xenapi().VBD.plug(vbd) LOG.debug(_('Plugging VBD %s done.'), vbd) orig_dev = session.get_xenapi().VBD.get_device(vbd) - LOG.debug(_('VBD %s plugged as %s'), vbd, orig_dev) + LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals()) dev = remap_vbd_dev(orig_dev) if dev != orig_dev: LOG.debug(_('VBD %(vbd)s plugged into wrong dev, ' diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 628a171fa..e84ce20c4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -149,7 +149,7 @@ class VMOps(object): if isinstance(instance_or_vm, (int, long)): ctx = context.get_admin_context() try: - instance_obj = db.instance_get_by_id(ctx, instance_or_vm) + instance_obj = db.instance_get(ctx, instance_or_vm) instance_name = instance_obj.name except exception.NotFound: # The unit tests screw this up, as they use an integer for diff --git a/nova/volume/api.py b/nova/volume/api.py index 0bcd8a3b0..478c83486 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -45,7 +45,7 @@ class API(base.Base): LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" " %(size)sG volume") % locals()) raise quota.QuotaError(_("Volume quota exceeded. You cannot " - "create a volume of size %s") % size) + "create a volume of size %sG") % size) options = { 'size': size, diff --git a/nova/volume/driver.py b/nova/volume/driver.py index da7307733..82f4c2f54 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -294,8 +294,10 @@ class ISCSIDriver(VolumeDriver): self._execute("sudo ietadm --op delete --tid=%s" % iscsi_target) - def _get_name_and_portal(self, volume_name, host): + def _get_name_and_portal(self, volume): """Gets iscsi name and portal from volume name and host.""" + volume_name = volume['name'] + host = volume['host'] (out, _err) = self._execute("sudo iscsiadm -m discovery -t " "sendtargets -p %s" % host) for target in out.splitlines(): @@ -307,8 +309,7 @@ class ISCSIDriver(VolumeDriver): def discover_volume(self, volume): """Discover volume on a remote host.""" - iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'], - volume['host']) + iscsi_name, iscsi_portal = self._get_name_and_portal(volume) self._execute("sudo iscsiadm -m node -T %s -p %s --login" % (iscsi_name, iscsi_portal)) self._execute("sudo iscsiadm -m node -T %s -p %s --op update " @@ -319,8 +320,7 @@ class ISCSIDriver(VolumeDriver): def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" - iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'], - volume['host']) + iscsi_name, iscsi_portal = self._get_name_and_portal(volume) self._execute("sudo iscsiadm -m node -T %s -p %s --op update " "-n node.startup -v manual" % (iscsi_name, iscsi_portal)) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 6f8e25e19..6e70ec881 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -87,7 +87,7 @@ class VolumeManager(manager.Manager): if volume['status'] in ['available', 'in-use']: self.driver.ensure_export(ctxt, volume) else: - LOG.info(_("volume %s: skipping export"), volume_ref['name']) + LOG.info(_("volume %s: skipping export"), volume['name']) def create_volume(self, context, volume_id): """Creates and exports the volume.""" diff --git a/nova/volume/san.py b/nova/volume/san.py new file mode 100644 index 000000000..26d6125e7 --- /dev/null +++ b/nova/volume/san.py @@ -0,0 +1,335 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Drivers for san-stored volumes. +The unique thing about a SAN is that we don't expect that we can run the volume + controller on the SAN hardware. We expect to access it over SSH or some API. +""" + +import os +import paramiko + +from nova import exception +from nova import flags +from nova import log as logging +from nova.utils import ssh_execute +from nova.volume.driver import ISCSIDriver + +LOG = logging.getLogger("nova.volume.driver") +FLAGS = flags.FLAGS +flags.DEFINE_boolean('san_thin_provision', 'true', + 'Use thin provisioning for SAN volumes?') +flags.DEFINE_string('san_ip', '', + 'IP address of SAN controller') +flags.DEFINE_string('san_login', 'admin', + 'Username for SAN controller') +flags.DEFINE_string('san_password', '', + 'Password for SAN controller') +flags.DEFINE_string('san_privatekey', '', + 'Filename of private key to use for SSH authentication') + + +class SanISCSIDriver(ISCSIDriver): + """ Base class for SAN-style storage volumes + (storage providers we access over SSH)""" + #Override because SAN ip != host ip + def _get_name_and_portal(self, volume): + """Gets iscsi name and portal from volume name and host.""" + volume_name = volume['name'] + + # TODO(justinsb): store in volume, remerge with generic iSCSI code + host = FLAGS.san_ip + + (out, _err) = self._execute("sudo iscsiadm -m discovery -t " + "sendtargets -p %s" % host) + + location = None + find_iscsi_name = self._build_iscsi_target_name(volume) + for target in out.splitlines(): + if find_iscsi_name in target: + (location, _sep, iscsi_name) = target.partition(" ") + break + if not location: + raise exception.Error(_("Could not find iSCSI export " + " for volume %s") % + volume_name) + + iscsi_portal = location.split(",")[0] + LOG.debug("iscsi_name=%s, iscsi_portal=%s" % + (iscsi_name, iscsi_portal)) + return (iscsi_name, iscsi_portal) + + def _build_iscsi_target_name(self, volume): + return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) + + # discover_volume is still OK + # undiscover_volume is still OK + + def _connect_to_ssh(self): + ssh = paramiko.SSHClient() + #TODO(justinsb): We need a better SSH key policy + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if FLAGS.san_password: + ssh.connect(FLAGS.san_ip, + username=FLAGS.san_login, + password=FLAGS.san_password) + elif FLAGS.san_privatekey: + privatekeyfile = os.path.expanduser(FLAGS.san_privatekey) + # It sucks that paramiko doesn't support DSA keys + privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile) + ssh.connect(FLAGS.san_ip, + username=FLAGS.san_login, + pkey=privatekey) + else: + raise exception.Error("Specify san_password or san_privatekey") + return ssh + + def _run_ssh(self, command, check_exit_code=True): + #TODO(justinsb): SSH connection caching (?) + ssh = self._connect_to_ssh() + + #TODO(justinsb): Reintroduce the retry hack + ret = ssh_execute(ssh, command, check_exit_code=check_exit_code) + + ssh.close() + + return ret + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume.""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + pass + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + if not (FLAGS.san_password or FLAGS.san_privatekey): + raise exception.Error("Specify san_password or san_privatekey") + + if not (FLAGS.san_ip): + raise exception.Error("san_ip must be set") + + +def _collect_lines(data): + """ Split lines from data into an array, trimming them """ + matches = [] + for line in data.splitlines(): + match = line.strip() + matches.append(match) + + return matches + + +def _get_prefixed_values(data, prefix): + """Collect lines which start with prefix; with trimming""" + matches = [] + for line in data.splitlines(): + line = line.strip() + if line.startswith(prefix): + match = line[len(prefix):] + match = match.strip() + matches.append(match) + + return matches + + +class SolarisISCSIDriver(SanISCSIDriver): + """Executes commands relating to Solaris-hosted ISCSI volumes. + Basic setup for a Solaris iSCSI server: + pkg install storage-server SUNWiscsit + svcadm enable stmf + svcadm enable -r svc:/network/iscsi/target:default + pfexec itadm create-tpg e1000g0 ${MYIP} + pfexec itadm create-target -t e1000g0 + + Then grant the user that will be logging on lots of permissions. + I'm not sure exactly which though: + zfs allow justinsb create,mount,destroy rpool + usermod -P'File System Management' justinsb + usermod -P'Primary Administrator' justinsb + + Also make sure you can login using san_login & san_password/san_privatekey + """ + + def _view_exists(self, luid): + cmd = "pfexec /usr/sbin/stmfadm list-view -l %s" % (luid) + (out, _err) = self._run_ssh(cmd, + check_exit_code=False) + if "no views found" in out: + return False + + if "View Entry:" in out: + return True + + raise exception.Error("Cannot parse list-view output: %s" % (out)) + + def _get_target_groups(self): + """Gets list of target groups from host.""" + (out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg") + matches = _get_prefixed_values(out, 'Target group: ') + LOG.debug("target_groups=%s" % matches) + return matches + + def _target_group_exists(self, target_group_name): + return target_group_name not in self._get_target_groups() + + def _get_target_group_members(self, target_group_name): + (out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg -v %s" % + (target_group_name)) + matches = _get_prefixed_values(out, 'Member: ') + LOG.debug("members of %s=%s" % (target_group_name, matches)) + return matches + + def _is_target_group_member(self, target_group_name, iscsi_target_name): + return iscsi_target_name in ( + self._get_target_group_members(target_group_name)) + + def _get_iscsi_targets(self): + cmd = ("pfexec /usr/sbin/itadm list-target | " + "awk '{print $1}' | grep -v ^TARGET") + (out, _err) = self._run_ssh(cmd) + matches = _collect_lines(out) + LOG.debug("_get_iscsi_targets=%s" % (matches)) + return matches + + def _iscsi_target_exists(self, iscsi_target_name): + return iscsi_target_name in self._get_iscsi_targets() + + def _build_zfs_poolname(self, volume): + #TODO(justinsb): rpool should be configurable + zfs_poolname = 'rpool/%s' % (volume['name']) + return zfs_poolname + + def create_volume(self, volume): + """Creates a volume.""" + if int(volume['size']) == 0: + sizestr = '100M' + else: + sizestr = '%sG' % volume['size'] + + zfs_poolname = self._build_zfs_poolname(volume) + + thin_provision_arg = '-s' if FLAGS.san_thin_provision else '' + # Create a zfs volume + self._run_ssh("pfexec /usr/sbin/zfs create %s -V %s %s" % + (thin_provision_arg, + sizestr, + zfs_poolname)) + + def _get_luid(self, volume): + zfs_poolname = self._build_zfs_poolname(volume) + + cmd = ("pfexec /usr/sbin/sbdadm list-lu | " + "grep -w %s | awk '{print $1}'" % + (zfs_poolname)) + + (stdout, _stderr) = self._run_ssh(cmd) + + luid = stdout.strip() + return luid + + def _is_lu_created(self, volume): + luid = self._get_luid(volume) + return luid + + def delete_volume(self, volume): + """Deletes a volume.""" + zfs_poolname = self._build_zfs_poolname(volume) + self._run_ssh("pfexec /usr/sbin/zfs destroy %s" % + (zfs_poolname)) + + def local_path(self, volume): + # TODO(justinsb): Is this needed here? + escaped_group = FLAGS.volume_group.replace('-', '--') + escaped_name = volume['name'].replace('-', '--') + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + #TODO(justinsb): On bootup, this is called for every volume. + # It then runs ~5 SSH commands for each volume, + # most of which fetch the same info each time + # This makes initial start stupid-slow + self._do_export(volume, force_create=False) + + def create_export(self, context, volume): + self._do_export(volume, force_create=True) + + def _do_export(self, volume, force_create): + # Create a Logical Unit (LU) backed by the zfs volume + zfs_poolname = self._build_zfs_poolname(volume) + + if force_create or not self._is_lu_created(volume): + cmd = ("pfexec /usr/sbin/sbdadm create-lu /dev/zvol/rdsk/%s" % + (zfs_poolname)) + self._run_ssh(cmd) + + luid = self._get_luid(volume) + iscsi_name = self._build_iscsi_target_name(volume) + target_group_name = 'tg-%s' % volume['name'] + + # Create a iSCSI target, mapped to just this volume + if force_create or not self._target_group_exists(target_group_name): + self._run_ssh("pfexec /usr/sbin/stmfadm create-tg %s" % + (target_group_name)) + + # Yes, we add the initiatior before we create it! + # Otherwise, it complains that the target is already active + if force_create or not self._is_target_group_member(target_group_name, + iscsi_name): + self._run_ssh("pfexec /usr/sbin/stmfadm add-tg-member -g %s %s" % + (target_group_name, iscsi_name)) + if force_create or not self._iscsi_target_exists(iscsi_name): + self._run_ssh("pfexec /usr/sbin/itadm create-target -n %s" % + (iscsi_name)) + if force_create or not self._view_exists(luid): + self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" % + (target_group_name, luid)) + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + + # This is the reverse of _do_export + luid = self._get_luid(volume) + iscsi_name = self._build_iscsi_target_name(volume) + target_group_name = 'tg-%s' % volume['name'] + + if self._view_exists(luid): + self._run_ssh("pfexec /usr/sbin/stmfadm remove-view -l %s -a" % + (luid)) + + if self._iscsi_target_exists(iscsi_name): + self._run_ssh("pfexec /usr/sbin/stmfadm offline-target %s" % + (iscsi_name)) + self._run_ssh("pfexec /usr/sbin/itadm delete-target %s" % + (iscsi_name)) + + # We don't delete the tg-member; we delete the whole tg! + + if self._target_group_exists(target_group_name): + self._run_ssh("pfexec /usr/sbin/stmfadm delete-tg %s" % + (target_group_name)) + + if self._is_lu_created(volume): + self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" % + (luid)) |
