diff options
| author | Tushar Patil <tushar.vitthal.patil@gmail.com> | 2011-08-15 17:30:13 -0700 |
|---|---|---|
| committer | Tushar Patil <tushar.vitthal.patil@gmail.com> | 2011-08-15 17:30:13 -0700 |
| commit | 2be419cd88d23ae5c0b6e5bd56fff5791e4685a7 (patch) | |
| tree | beac43baed8eeea7efe91694866041620ef5a42a /nova | |
| parent | 24df37853288d2a1fb8e51c23a78816da4d0d4b4 (diff) | |
| parent | ea53d0f37a4f478ffbe18516f99ca26192117e80 (diff) | |
| download | nova-2be419cd88d23ae5c0b6e5bd56fff5791e4685a7.tar.gz nova-2be419cd88d23ae5c0b6e5bd56fff5791e4685a7.tar.xz nova-2be419cd88d23ae5c0b6e5bd56fff5791e4685a7.zip | |
Merged trunk
Diffstat (limited to 'nova')
37 files changed, 1496 insertions, 678 deletions
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 8b6e47cfb..96df97393 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -358,6 +358,10 @@ class Executor(wsgi.Application): LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) + except exception.InvalidPortRange as ex: + LOG.debug(_('InvalidPortRange raised: %s'), unicode(ex), + context=context) + return self._error(req, context, type(ex).__name__, unicode(ex)) except Exception as ex: extra = {'environment': req.environ} LOG.exception(_('Unexpected error raised: %s'), unicode(ex), diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index c07bfdf09..44b35c385 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -43,8 +43,8 @@ def _translate_floating_ip_view(floating_ip): def _translate_floating_ips_view(floating_ips): - return {'floating_ips': [_translate_floating_ip_view(floating_ip) - for floating_ip in floating_ips]} + return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip'] + for ip in floating_ips]} class FloatingIPController(object): @@ -131,7 +131,7 @@ class FloatingIPController(object): "floating_ip": floating_ip, "fixed_ip": fixed_ip}} - def disassociate(self, req, id): + def disassociate(self, req, id, body=None): """ POST /floating_ips/{id}/disassociate """ context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 8ff01d9ea..c1abd2eb6 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -128,6 +128,7 @@ class CreateInstanceHelper(object): raise exc.HTTPBadRequest(explanation=msg) zone_blob = server_dict.get('blob') + availability_zone = server_dict.get('availability_zone') name = server_dict['name'] self._validate_server_name(name) name = name.strip() @@ -168,7 +169,8 @@ class CreateInstanceHelper(object): reservation_id=reservation_id, min_count=min_count, max_count=max_count, - requested_networks=requested_networks)) + requested_networks=requested_networks, + availability_zone=availability_zone)) except quota.QuotaError as error: self._handle_quota_error(error) except exception.ImageNotFound as error: diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 77a304941..5c34e28fd 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -163,7 +163,7 @@ class Controller(object): @scheduler_api.redirect_handler def update(self, req, id, body): - """ Updates the server name or password """ + """Update server name then pass on to version-specific controller""" if len(req.body) == 0: raise exc.HTTPUnprocessableEntity() @@ -178,17 +178,15 @@ class Controller(object): self.helper._validate_server_name(name) update_dict['display_name'] = name.strip() - self._parse_update(ctxt, id, body, update_dict) - try: self.compute_api.update(ctxt, id, **update_dict) except exception.NotFound: raise exc.HTTPNotFound() - return exc.HTTPNoContent() + return self._update(ctxt, req, id, body) - def _parse_update(self, context, id, inst_dict, update_dict): - pass + def _update(self, context, req, id, inst_dict): + return exc.HTTPNotImplemented() @scheduler_api.redirect_handler def action(self, req, id, body): @@ -210,11 +208,15 @@ class Controller(object): } self.actions.update(admin_actions) - for key in self.actions.keys(): - if key in body: + for key in body: + if key in self.actions: return self.actions[key](body, req, id) + else: + msg = _("There is no such server action: %s") % (key,) + raise exc.HTTPBadRequest(explanation=msg) - raise exc.HTTPNotImplemented() + msg = _("Invalid request body") + raise exc.HTTPBadRequest(explanation=msg) def _action_create_backup(self, input_dict, req, instance_id): """Backup a server instance. @@ -568,10 +570,11 @@ class ControllerV10(Controller): def _limit_items(self, items, req): return common.limited(items, req) - def _parse_update(self, context, server_id, inst_dict, update_dict): + def _update(self, context, req, id, inst_dict): if 'adminPass' in inst_dict['server']: - self.compute_api.set_admin_password(context, server_id, + self.compute_api.set_admin_password(context, id, inst_dict['server']['adminPass']) + return exc.HTTPNoContent() def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ @@ -693,6 +696,10 @@ class ControllerV11(Controller): LOG.info(msg) raise exc.HTTPBadRequest(explanation=msg) + def _update(self, context, req, id, inst_dict): + instance = self.compute_api.routing_get(context, id) + return self._build_view(req, instance, is_detail=True) + def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ try: diff --git a/nova/compute/api.py b/nova/compute/api.py index 6a2c98a6a..e0d43c626 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -404,10 +404,6 @@ class API(base.Base): updates['hostname'] = self.hostname_factory(instance) instance = self.update(context, instance_id, **updates) - - for group_id in security_groups: - self.trigger_security_group_members_refresh(elevated, group_id) - return instance def _ask_scheduler_to_create_instance(self, context, base_options, @@ -579,18 +575,20 @@ class API(base.Base): {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group.id}}) - def trigger_security_group_members_refresh(self, context, group_id): + def trigger_security_group_members_refresh(self, context, group_ids): """Called when a security group gains a new or loses a member. Sends an update request to each compute node for whom this is relevant. """ - # First, we get the security group rules that reference this group as + # First, we get the security group rules that reference these groups as # the grantee.. - security_group_rules = \ + security_group_rules = set() + for group_id in group_ids: + security_group_rules.update( self.db.security_group_rule_get_by_security_group_grantee( context, - group_id) + group_id)) # ..then we distill the security groups to which they belong.. security_groups = set() diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c8a328325..d9ca31f60 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -170,7 +170,9 @@ class ComputeManager(manager.SchedulerDependentManager): elif drv_state == power_state.RUNNING: # Hyper-V and VMWareAPI drivers will raise and exception try: - self.driver.ensure_filtering_rules_for_instance(instance) + net_info = self._get_instance_nw_info(context, instance) + self.driver.ensure_filtering_rules_for_instance(instance, + net_info) except NotImplementedError: LOG.warning(_('Hypervisor driver does not ' 'support firewall rules')) @@ -1226,6 +1228,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def check_shared_storage_test_file(self, context, filename): """Confirms existence of the tmpfile under FLAGS.instances_path. + Cannot confirm tmpfile return False. :param context: security context :param filename: confirm existence of FLAGS.instances_path/thisfile @@ -1233,7 +1236,9 @@ class ComputeManager(manager.SchedulerDependentManager): """ tmp_file = os.path.join(FLAGS.instances_path, filename) if not os.path.exists(tmp_file): - raise exception.FileNotFound(file_path=tmp_file) + return False + else: + return True @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def cleanup_shared_storage_test_file(self, context, filename): @@ -1256,11 +1261,13 @@ class ComputeManager(manager.SchedulerDependentManager): """ return self.driver.update_available_resource(context, self.host) - def pre_live_migration(self, context, instance_id, time=None): + def pre_live_migration(self, context, instance_id, time=None, + block_migration=False, disk=None): """Preparations for live migration at dest host. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param block_migration: if true, prepare for block migration """ if not time: @@ -1310,19 +1317,26 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref) + self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info) + + # Preparation for block migration + if block_migration: + self.driver.pre_block_migration(context, + instance_ref, + disk) - def live_migration(self, context, instance_id, dest): + def live_migration(self, context, instance_id, + dest, block_migration=False): """Executing live migration. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param dest: destination host + :param block_migration: if true, do block migration """ # Get instance for error handling. instance_ref = self.db.instance_get(context, instance_id) - i_name = instance_ref.name try: # Checking volume node is working correctly when any volumes @@ -1333,16 +1347,25 @@ class ComputeManager(manager.SchedulerDependentManager): {"method": "check_for_export", "args": {'instance_id': instance_id}}) - # Asking dest host to preparing live migration. + if block_migration: + disk = self.driver.get_instance_disk_info(context, + instance_ref) + else: + disk = None + rpc.call(context, self.db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": "pre_live_migration", - "args": {'instance_id': instance_id}}) + "args": {'instance_id': instance_id, + 'block_migration': block_migration, + 'disk': disk}}) except Exception: + i_name = instance_ref.name msg = _("Pre live migration for %(i_name)s failed at %(dest)s") LOG.error(msg % locals()) - self.recover_live_migration(context, instance_ref) + self.rollback_live_migration(context, instance_ref, + dest, block_migration) raise # Executing live migration @@ -1350,9 +1373,11 @@ class ComputeManager(manager.SchedulerDependentManager): # nothing must be recovered in this version. self.driver.live_migration(context, instance_ref, dest, self.post_live_migration, - self.recover_live_migration) + self.rollback_live_migration, + block_migration) - def post_live_migration(self, ctxt, instance_ref, dest): + def post_live_migration(self, ctxt, instance_ref, + dest, block_migration=False): """Post operations for live migration. This method is called from live_migration @@ -1361,6 +1386,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param ctxt: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param dest: destination host + :param block_migration: if true, do block migration """ @@ -1403,8 +1429,29 @@ class ComputeManager(manager.SchedulerDependentManager): "%(i_name)s cannot inherit floating " "ip.\n%(e)s") % (locals())) - # Restore instance/volume state - self.recover_live_migration(ctxt, instance_ref, dest) + # Define domain at destination host, without doing it, + # pause/suspend/terminate do not work. + rpc.call(ctxt, + self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + {"method": "post_live_migration_at_destination", + "args": {'instance_id': instance_ref.id, + 'block_migration': block_migration}}) + + # Restore instance state + self.db.instance_update(ctxt, + instance_ref['id'], + {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': dest}) + # Restore volume state + for volume_ref in instance_ref['volumes']: + volume_id = volume_ref['id'] + self.db.volume_update(ctxt, volume_id, {'status': 'in-use'}) + + # No instance booting at source host, but instance dir + # must be deleted for preparing next block migration + if block_migration: + self.driver.destroy(instance_ref, network_info) LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.') % locals()) @@ -1412,31 +1459,64 @@ class ComputeManager(manager.SchedulerDependentManager): "Domain not found: no domain with matching name.\" " "This error can be safely ignored.")) - def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None): - """Recovers Instance/volume state from migrating -> running. + def post_live_migration_at_destination(self, context, + instance_id, block_migration=False): + """Post operations for live migration . - :param ctxt: security context + :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id - :param host: DB column value is updated by this hostname. - If none, the host instance currently running is selected. + :param block_migration: block_migration """ - if not host: - host = instance_ref['host'] + instance_ref = self.db.instance_get(context, instance_id) + LOG.info(_('Post operation of migraton started for %s .') + % instance_ref.name) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.post_live_migration_at_destination(context, + instance_ref, + network_info, + block_migration) - self.db.instance_update(ctxt, + def rollback_live_migration(self, context, instance_ref, + dest, block_migration): + """Recovers Instance/volume state from migrating -> running. + + :param context: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param dest: + This method is called from live migration src host. + This param specifies destination host. + """ + host = instance_ref['host'] + self.db.instance_update(context, instance_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, 'host': host}) - if dest: - volume_api = volume.API() for volume_ref in instance_ref['volumes']: volume_id = volume_ref['id'] - self.db.volume_update(ctxt, volume_id, {'status': 'in-use'}) - if dest: - volume_api.remove_from_compute(ctxt, volume_id, dest) + self.db.volume_update(context, volume_id, {'status': 'in-use'}) + volume.API().remove_from_compute(context, volume_id, dest) + + # Block migration needs empty image at destination host + # before migration starts, so if any failure occurs, + # any empty images has to be deleted. + if block_migration: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, dest), + {"method": "rollback_live_migration_at_destination", + "args": {'instance_id': instance_ref['id']}}) + + def rollback_live_migration_at_destination(self, context, instance_id): + """ Cleaning up image directory that is created pre_live_migration. + + :param context: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + """ + instances_ref = self.db.instance_get(context, instance_id) + network_info = self._get_instance_nw_info(context, instances_ref) + self.driver.destroy(instances_ref, network_info) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" diff --git a/nova/db/api.py b/nova/db/api.py index 9e1d2e86d..b6b98daf4 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -585,27 +585,6 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - """Get instances.vcpus by host and project.""" - return IMPL.instance_get_vcpu_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - """Get amount of memory by host and project.""" - return IMPL.instance_get_memory_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - """Get total amount of disk by host and project.""" - return IMPL.instance_get_disk_sum_by_host_and_project(context, - hostname, - proj_id) - - def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index bbbe94e7a..dde371820 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1537,45 +1537,6 @@ def instance_add_security_group(context, instance_id, security_group_id): @require_context -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.vcpus)) - if not result: - return 0 - return result - - -@require_context -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.memory_mb)) - if not result: - return 0 - return result - - -@require_context -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.local_gb)) - if not result: - return 0 - return result - - -@require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" action_ref = models.InstanceActions() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 939fde199..f2a4680b0 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -127,14 +127,14 @@ class ComputeNode(BASE, NovaBase): 'ComputeNode.service_id == Service.id,' 'ComputeNode.deleted == False)') - vcpus = Column(Integer, nullable=True) - memory_mb = Column(Integer, nullable=True) - local_gb = Column(Integer, nullable=True) - vcpus_used = Column(Integer, nullable=True) - memory_mb_used = Column(Integer, nullable=True) - local_gb_used = Column(Integer, nullable=True) - hypervisor_type = Column(Text, nullable=True) - hypervisor_version = Column(Integer, nullable=True) + vcpus = Column(Integer) + memory_mb = Column(Integer) + local_gb = Column(Integer) + vcpus_used = Column(Integer) + memory_mb_used = Column(Integer) + local_gb_used = Column(Integer) + hypervisor_type = Column(Text) + hypervisor_version = Column(Integer) # Note(masumotok): Expected Strings example: # @@ -479,6 +479,11 @@ class SecurityGroupIngressRule(BASE, NovaBase): # Note: This is not the parent SecurityGroup. It's SecurityGroup we're # granting access for. group_id = Column(Integer, ForeignKey('security_groups.id')) + grantee_group = relationship("SecurityGroup", + foreign_keys=group_id, + primaryjoin='and_(' + 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' + 'SecurityGroupIngressRule.deleted == False)') class ProviderFirewallRule(BASE, NovaBase): diff --git a/nova/exception.py b/nova/exception.py index b4bd4ff9e..0c07934cf 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -277,6 +277,11 @@ class DestinationHypervisorTooOld(Invalid): "has been provided.") +class DestinationDiskExists(Invalid): + message = _("The supplied disk path (%(path)s) already exists, " + "it is expected not to exist.") + + class InvalidDevicePath(Invalid): message = _("The supplied device path (%(path)s) is invalid.") @@ -725,6 +730,10 @@ class InstanceExists(Duplicate): message = _("Instance %(name)s already exists.") +class InvalidSharedStorage(NovaException): + message = _("%(path)s is on shared storage: %(reason)s") + + class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" diff --git a/nova/flags.py b/nova/flags.py index e994a1665..48d5e8168 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -305,6 +305,7 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') +DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') DEFINE_list('enabled_apis', ['ec2', 'osapi'], 'list of APIs to enable by default') DEFINE_string('ec2_host', '$my_ip', 'ip of api server') diff --git a/nova/network/manager.py b/nova/network/manager.py index 8c5281b73..f8a21fe4b 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -61,6 +61,7 @@ from nova import quota from nova import utils from nova import rpc from nova.network import api as network_api +from nova.compute import api as compute_api import random @@ -324,6 +325,7 @@ class NetworkManager(manager.SchedulerDependentManager): network_driver = FLAGS.network_driver self.driver = utils.import_object(network_driver) self.network_api = network_api.API() + self.compute_api = compute_api.API() super(NetworkManager, self).__init__(service_name='network', *args, **kwargs) @@ -379,6 +381,15 @@ class NetworkManager(manager.SchedulerDependentManager): self.host) return host + def _do_trigger_security_group_members_refresh_for_instance(self, + instance_id): + admin_context = context.get_admin_context() + instance_ref = self.db.instance_get(admin_context, instance_id) + groups = instance_ref['security_groups'] + group_ids = [group['id'] for group in groups] + self.compute_api.trigger_security_group_members_refresh(admin_context, + group_ids) + def _get_networks_for_instance(self, context, instance_id, project_id, requested_networks=None): """Determine & return which networks an instance should connect to.""" @@ -578,6 +589,8 @@ class NetworkManager(manager.SchedulerDependentManager): network['id'], instance_id, kwargs.get('address', None)) + self._do_trigger_security_group_members_refresh_for_instance( + instance_id) get_vif = self.db.virtual_interface_get_by_instance_and_network vif = get_vif(context, instance_id, network['id']) values = {'allocated': True, @@ -592,6 +605,11 @@ class NetworkManager(manager.SchedulerDependentManager): self.db.fixed_ip_update(context, address, {'allocated': False, 'virtual_interface_id': None}) + fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) + instance_ref = fixed_ip_ref['instance'] + instance_id = instance_ref['id'] + self._do_trigger_security_group_members_refresh_for_instance( + instance_id) def lease_fixed_ip(self, context, address): """Called by dhcp-bridge when ip is leased.""" @@ -633,6 +651,64 @@ class NetworkManager(manager.SchedulerDependentManager): network_ref = self.db.fixed_ip_get_network(context, address) self._setup_network(context, network_ref) + def _validate_cidrs(self, context, cidr, num_networks, network_size): + significant_bits = 32 - int(math.log(network_size, 2)) + req_net = netaddr.IPNetwork(cidr) + req_net_ip = str(req_net.ip) + req_size = network_size * num_networks + if req_size > req_net.size: + msg = _("network_size * num_networks exceeds cidr size") + raise ValueError(msg) + adjusted_cidr_str = req_net_ip + '/' + str(significant_bits) + adjusted_cidr = netaddr.IPNetwork(adjusted_cidr_str) + try: + used_nets = self.db.network_get_all(context) + except exception.NoNetworksFound: + used_nets = [] + used_cidrs = [netaddr.IPNetwork(net['cidr']) for net in used_nets] + if adjusted_cidr in used_cidrs: + raise ValueError(_("cidr already in use")) + for adjusted_cidr_supernet in adjusted_cidr.supernet(): + if adjusted_cidr_supernet in used_cidrs: + msg = _("requested cidr (%s) conflicts with existing supernet") + raise ValueError(msg % str(adjusted_cidr)) + # watch for smaller subnets conflicting + used_supernets = [] + for used_cidr in used_cidrs: + if not used_cidr: + continue + if used_cidr.size < network_size: + for ucsupernet in used_cidr.supernet(): + if ucsupernet.size == network_size: + used_supernets.append(ucsupernet) + all_req_nets = [] + if num_networks == 1: + if adjusted_cidr in used_supernets: + msg = _("requested cidr (%s) conflicts with existing smaller" + " cidr") + raise ValueError(msg % str(adjusted_cidr)) + else: + all_req_nets.append(adjusted_cidr) + elif num_networks >= 2: + # split supernet into subnets + next_cidr = adjusted_cidr + for index in range(num_networks): + if next_cidr.first > req_net.last: + msg = _("Not enough subnets avail to satisfy requested " + "num_net works - some subnets in requested range" + " already in use") + raise ValueError(msg) + while True: + used_values = used_cidrs + used_supernets + if next_cidr in used_values: + next_cidr = next_cidr.next() + else: + all_req_nets.append(next_cidr) + next_cidr = next_cidr.next() + break + all_req_nets = sorted(list(set(all_req_nets))) + return all_req_nets + def create_networks(self, context, label, cidr, multi_host, num_networks, network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, **kwargs): @@ -643,8 +719,8 @@ class NetworkManager(manager.SchedulerDependentManager): network_size_v6 = 1 << 64 if cidr: - fixed_net = netaddr.IPNetwork(cidr) - significant_bits = 32 - int(math.log(network_size, 2)) + req_cidrs = self._validate_cidrs(context, cidr, num_networks, + network_size) for index in range(num_networks): net = {} @@ -654,9 +730,7 @@ class NetworkManager(manager.SchedulerDependentManager): net['dns2'] = dns2 if cidr: - start = index * network_size - project_net = netaddr.IPNetwork('%s/%s' % (fixed_net[start], - significant_bits)) + project_net = req_cidrs[index] net['cidr'] = str(project_net) net['multi_host'] = multi_host net['netmask'] = str(project_net.netmask) @@ -905,7 +979,8 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): network['id'], instance_id, kwargs.get('address', None)) - + self._do_trigger_security_group_members_refresh_for_instance( + instance_id) vif = self.db.virtual_interface_get_by_instance_and_network(context, instance_id, network['id']) diff --git a/nova/rpc/amqp.py b/nova/rpc/amqp.py index 61555795a..fe429b266 100644 --- a/nova/rpc/amqp.py +++ b/nova/rpc/amqp.py @@ -257,7 +257,7 @@ class TopicAdapterConsumer(AdapterConsumer): self.queue = topic self.routing_key = topic self.exchange = FLAGS.control_exchange - self.durable = False + self.durable = FLAGS.rabbit_durable_queues super(TopicAdapterConsumer, self).__init__(connection=connection, topic=topic, proxy=proxy) @@ -345,7 +345,7 @@ class TopicPublisher(Publisher): def __init__(self, connection=None, topic='broadcast'): self.routing_key = topic self.exchange = FLAGS.control_exchange - self.durable = False + self.durable = FLAGS.rabbit_durable_queues super(TopicPublisher, self).__init__(connection=connection) @@ -373,6 +373,7 @@ class DirectConsumer(Consumer): self.queue = msg_id self.routing_key = msg_id self.exchange = msg_id + self.durable = False self.auto_delete = True self.exclusive = True super(DirectConsumer, self).__init__(connection=connection) @@ -386,6 +387,7 @@ class DirectPublisher(Publisher): def __init__(self, connection=None, msg_id=None): self.routing_key = msg_id self.exchange = msg_id + self.durable = False self.auto_delete = True super(DirectPublisher, self).__init__(connection=connection) @@ -573,7 +575,7 @@ def send_message(topic, message, wait=True): publisher = messaging.Publisher(connection=Connection.instance(), exchange=FLAGS.control_exchange, - durable=False, + durable=FLAGS.rabbit_durable_queues, exchange_type='topic', routing_key=topic) publisher.send(message) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 1bfa7740a..f28353f05 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -30,6 +30,7 @@ from nova import log as logging from nova import rpc from nova import utils from nova.compute import power_state +from nova.api.ec2 import ec2utils FLAGS = flags.FLAGS @@ -78,7 +79,8 @@ class Scheduler(object): """Must override at least this method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) - def schedule_live_migration(self, context, instance_id, dest): + def schedule_live_migration(self, context, instance_id, dest, + block_migration=False): """Live migration scheduling method. :param context: @@ -87,9 +89,7 @@ class Scheduler(object): :return: The host where instance is running currently. Then scheduler send request that host. - """ - # Whether instance exists and is running. instance_ref = db.instance_get(context, instance_id) @@ -97,10 +97,11 @@ class Scheduler(object): self._live_migration_src_check(context, instance_ref) # Checking destination host. - self._live_migration_dest_check(context, instance_ref, dest) - + self._live_migration_dest_check(context, instance_ref, + dest, block_migration) # Common checking. - self._live_migration_common_check(context, instance_ref, dest) + self._live_migration_common_check(context, instance_ref, + dest, block_migration) # Changing instance_state. db.instance_set_state(context, @@ -130,7 +131,8 @@ class Scheduler(object): # Checking instance is running. if (power_state.RUNNING != instance_ref['state'] or \ 'running' != instance_ref['state_description']): - raise exception.InstanceNotRunning(instance_id=instance_ref['id']) + instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) + raise exception.InstanceNotRunning(instance_id=instance_id) # Checing volume node is running when any volumes are mounted # to the instance. @@ -147,7 +149,8 @@ class Scheduler(object): if not self.service_is_up(services[0]): raise exception.ComputeServiceUnavailable(host=src) - def _live_migration_dest_check(self, context, instance_ref, dest): + def _live_migration_dest_check(self, context, instance_ref, dest, + block_migration): """Live migration check routine (for destination host). :param context: security context @@ -168,16 +171,18 @@ class Scheduler(object): # and dest is not same. src = instance_ref['host'] if dest == src: - raise exception.UnableToMigrateToSelf( - instance_id=instance_ref['id'], - host=dest) + instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) + raise exception.UnableToMigrateToSelf(instance_id=instance_id, + host=dest) # Checking dst host still has enough capacities. self.assert_compute_node_has_enough_resources(context, instance_ref, - dest) + dest, + block_migration) - def _live_migration_common_check(self, context, instance_ref, dest): + def _live_migration_common_check(self, context, instance_ref, dest, + block_migration): """Live migration common check routine. Below checkings are followed by @@ -186,11 +191,26 @@ class Scheduler(object): :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host + :param block_migration if True, check for block_migration. """ # Checking shared storage connectivity - self.mounted_on_same_shared_storage(context, instance_ref, dest) + # if block migration, instances_paths should not be on shared storage. + try: + self.mounted_on_same_shared_storage(context, instance_ref, dest) + if block_migration: + reason = _("Block migration can not be used " + "with shared storage.") + raise exception.InvalidSharedStorage(reason=reason, path=dest) + except exception.FileNotFound: + if not block_migration: + src = instance_ref['host'] + ipath = FLAGS.instances_path + logging.error(_("Cannot confirm tmpfile at %(ipath)s is on " + "same shared storage between %(src)s " + "and %(dest)s.") % locals()) + raise # Checking dest exists. dservice_refs = db.service_get_all_compute_by_host(context, dest) @@ -229,14 +249,26 @@ class Scheduler(object): "original host %(src)s.") % locals()) raise - def assert_compute_node_has_enough_resources(self, context, - instance_ref, dest): + def assert_compute_node_has_enough_resources(self, context, instance_ref, + dest, block_migration): + """Checks if destination host has enough resource for live migration. - Currently, only memory checking has been done. - If storage migration(block migration, meaning live-migration - without any shared storage) will be available, local storage - checking is also necessary. + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + :param block_migration: if True, disk checking has been done + + """ + self.assert_compute_node_has_enough_memory(context, instance_ref, dest) + if not block_migration: + return + self.assert_compute_node_has_enough_disk(context, instance_ref, dest) + + def assert_compute_node_has_enough_memory(self, context, + instance_ref, dest): + """Checks if destination host has enough memory for live migration. + :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object @@ -244,23 +276,70 @@ class Scheduler(object): """ - # Getting instance information - hostname = instance_ref['hostname'] + # Getting total available memory and disk of host + avail = self._get_compute_info(context, dest, 'memory_mb') - # Getting host information - service_refs = db.service_get_all_compute_by_host(context, dest) - compute_node_ref = service_refs[0]['compute_node'][0] + # Getting total used memory and disk of host + # It should be sum of memories that are assigned as max value, + # because overcommiting is risky. + used = 0 + instance_refs = db.instance_get_all_by_host(context, dest) + used_list = [i['memory_mb'] for i in instance_refs] + if used_list: + used = reduce(lambda x, y: x + y, used_list) - mem_total = int(compute_node_ref['memory_mb']) - mem_used = int(compute_node_ref['memory_mb_used']) - mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] - if mem_avail <= mem_inst: - reason = _("Unable to migrate %(hostname)s to destination: " - "%(dest)s (host:%(mem_avail)s <= instance:" - "%(mem_inst)s)") + avail = avail - used + if avail <= mem_inst: + instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) + reason = _("Unable to migrate %(instance_id)s to %(dest)s: " + "Lack of disk(host:%(avail)s <= instance:%(mem_inst)s)") + raise exception.MigrationError(reason=reason % locals()) + + def assert_compute_node_has_enough_disk(self, context, + instance_ref, dest): + """Checks if destination host has enough disk for block migration. + + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + + """ + + # Getting total available memory and disk of host + avail = self._get_compute_info(context, dest, 'local_gb') + + # Getting total used memory and disk of host + # It should be sum of disks that are assigned as max value + # because overcommiting is risky. + used = 0 + instance_refs = db.instance_get_all_by_host(context, dest) + used_list = [i['local_gb'] for i in instance_refs] + if used_list: + used = reduce(lambda x, y: x + y, used_list) + + disk_inst = instance_ref['local_gb'] + avail = avail - used + if avail <= disk_inst: + instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) + reason = _("Unable to migrate %(instance_id)s to %(dest)s: " + "Lack of disk(host:%(avail)s " + "<= instance:%(disk_inst)s)") raise exception.MigrationError(reason=reason % locals()) + def _get_compute_info(self, context, host, key): + """get compute node's infomation specified by key + + :param context: security context + :param host: hostname(must be compute node) + :param key: column name of compute_nodes + :return: value specified by key + + """ + compute_node_ref = db.service_get_all_compute_by_host(context, host) + compute_node_ref = compute_node_ref[0]['compute_node'][0] + return compute_node_ref[key] + def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. @@ -283,15 +362,13 @@ class Scheduler(object): {"method": 'create_shared_storage_test_file'}) # make sure existence at src host. - rpc.call(context, src_t, - {"method": 'check_shared_storage_test_file', - "args": {'filename': filename}}) + ret = rpc.call(context, src_t, + {"method": 'check_shared_storage_test_file', + "args": {'filename': filename}}) + if not ret: + raise exception.FileNotFound(file_path=filename) - except rpc.RemoteError: - ipath = FLAGS.instances_path - logging.error(_("Cannot confirm tmpfile at %(ipath)s is on " - "same shared storage between %(src)s " - "and %(dest)s.") % locals()) + except exception.FileNotFound: raise finally: diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index c8b16b622..0e395ee79 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -34,12 +34,13 @@ from nova.scheduler import zone_manager LOG = logging.getLogger('nova.scheduler.manager') FLAGS = flags.FLAGS flags.DEFINE_string('scheduler_driver', - 'nova.scheduler.chance.ChanceScheduler', - 'Driver to use for the scheduler') + 'nova.scheduler.multi.MultiScheduler', + 'Default driver to use for the scheduler') class SchedulerManager(manager.Manager): """Chooses a host to run instances on.""" + def __init__(self, scheduler_driver=None, *args, **kwargs): self.zone_manager = zone_manager.ZoneManager() if not scheduler_driver: @@ -71,8 +72,8 @@ class SchedulerManager(manager.Manager): def update_service_capabilities(self, context=None, service_name=None, host=None, capabilities=None): """Process a capability update from a service node.""" - if not capability: - capability = {} + if not capabilities: + capabilities = {} self.zone_manager.update_service_capabilities(service_name, host, capabilities) @@ -113,7 +114,7 @@ class SchedulerManager(manager.Manager): # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin. # Based on bexar design summit discussion, # just put this here for bexar release. - def show_host_resources(self, context, host, *args): + def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context @@ -121,43 +122,45 @@ class SchedulerManager(manager.Manager): :returns: example format is below. {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} - D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048} + D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, + 'vcpus_used': 12, 'memory_mb_used': 10240, + 'local_gb_used': 64} """ + # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] - - # Getting physical resource information - compute_node_ref = compute_ref['compute_node'][0] - resource = {'vcpus': compute_node_ref['vcpus'], - 'memory_mb': compute_node_ref['memory_mb'], - 'local_gb': compute_node_ref['local_gb'], - 'vcpus_used': compute_node_ref['vcpus_used'], - 'memory_mb_used': compute_node_ref['memory_mb_used'], - 'local_gb_used': compute_node_ref['local_gb_used']} - - # Getting usage resource information - usage = {} instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) + + # Getting total available/used resource + compute_ref = compute_ref['compute_node'][0] + resource = {'vcpus': compute_ref['vcpus'], + 'memory_mb': compute_ref['memory_mb'], + 'local_gb': compute_ref['local_gb'], + 'vcpus_used': compute_ref['vcpus_used'], + 'memory_mb_used': compute_ref['memory_mb_used'], + 'local_gb_used': compute_ref['local_gb_used']} + usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} + # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: - vcpus = db.instance_get_vcpu_sum_by_host_and_project(context, - host, - project_id) - mem = db.instance_get_memory_sum_by_host_and_project(context, - host, - project_id) - hdd = db.instance_get_disk_sum_by_host_and_project(context, - host, - project_id) - usage[project_id] = {'vcpus': int(vcpus), - 'memory_mb': int(mem), - 'local_gb': int(hdd)} + vcpus = [i['vcpus'] for i in instance_refs \ + if i['project_id'] == project_id] + + mem = [i['memory_mb'] for i in instance_refs \ + if i['project_id'] == project_id] + + disk = [i['local_gb'] for i in instance_refs \ + if i['project_id'] == project_id] + + usage[project_id] = {'vcpus': reduce(lambda x, y: x + y, vcpus), + 'memory_mb': reduce(lambda x, y: x + y, mem), + 'local_gb': reduce(lambda x, y: x + y, disk)} return {'resource': resource, 'usage': usage} diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py new file mode 100644 index 000000000..b1578033c --- /dev/null +++ b/nova/scheduler/multi.py @@ -0,0 +1,73 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler that allows routing some calls to one driver and others to another. +""" + +from nova import flags +from nova import utils +from nova.scheduler import driver + + +FLAGS = flags.FLAGS +flags.DEFINE_string('compute_scheduler_driver', + 'nova.scheduler.chance.ChanceScheduler', + 'Driver to use for scheduling compute calls') +flags.DEFINE_string('volume_scheduler_driver', + 'nova.scheduler.chance.ChanceScheduler', + 'Driver to use for scheduling volume calls') + + +# A mapping of methods to topics so we can figure out which driver to use. +_METHOD_MAP = {'run_instance': 'compute', + 'start_instance': 'compute', + 'create_volume': 'volume'} + + +class MultiScheduler(driver.Scheduler): + """A scheduler that holds multiple sub-schedulers. + + This exists to allow flag-driven composibility of schedulers, allowing + third parties to integrate custom schedulers more easily. + + """ + + def __init__(self): + super(MultiScheduler, self).__init__() + compute_driver = utils.import_object(FLAGS.compute_scheduler_driver) + volume_driver = utils.import_object(FLAGS.volume_scheduler_driver) + + self.drivers = {'compute': compute_driver, + 'volume': volume_driver} + + def __getattr__(self, key): + if not key.startswith('schedule_'): + raise AttributeError(key) + method = key[len('schedule_'):] + if method not in _METHOD_MAP: + raise AttributeError(key) + return getattr(self.drivers[_METHOD_MAP[method]], key) + + def set_zone_manager(self, zone_manager): + for k, v in self.drivers.iteritems(): + v.set_zone_manager(zone_manager) + + def schedule(self, context, topic, *_args, **_kwargs): + return self.drivers[topic].schedule(context, topic, *_args, **_kwargs) diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index ab7ae2e54..704d06582 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -116,14 +116,14 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) - response = {'floating_ips': [{'floating_ip': {'instance_id': 11, - 'ip': '10.10.10.10', - 'fixed_ip': '10.0.0.1', - 'id': 1}}, - {'floating_ip': {'instance_id': None, - 'ip': '10.10.10.11', - 'fixed_ip': None, - 'id': 2}}]} + response = {'floating_ips': [{'instance_id': 11, + 'ip': '10.10.10.10', + 'fixed_ip': '10.0.0.1', + 'id': 1}, + {'instance_id': None, + 'ip': '10.10.10.11', + 'fixed_ip': None, + 'id': 2}]} self.assertEqual(res_dict, response) def test_floating_ip_show(self): @@ -177,8 +177,10 @@ class FloatingIpTest(test.TestCase): self.assertEqual(actual, expected) def test_floating_ip_disassociate(self): + body = dict() req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate') req.method = 'POST' + req.body = json.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 089b921eb..8a17ee5b2 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -127,9 +127,7 @@ class ExtensionControllerTest(test.TestCase): "updated": "2011-01-22T13:25:27-06:00", "description": "The Fox In Socks Extension", "alias": "FOXNSOX", - "links": [], - }, - ) + "links": []}) def test_list_extensions_xml(self): app = openstack.APIRouterV11() @@ -281,7 +279,7 @@ class ActionExtensionTest(test.TestCase): def test_invalid_action_body(self): body = dict(blah=dict(name="test")) # Doesn't exist response = self._send_server_action_request("/servers/1/action", body) - self.assertEqual(501, response.status_int) + self.assertEqual(400, response.status_int) def test_invalid_action(self): body = dict(blah=dict(name="test")) @@ -336,27 +334,18 @@ class ExtensionsXMLSerializerTest(test.TestCase): def test_serialize_extenstion(self): serializer = extensions.ExtensionsXMLSerializer() - data = { - 'extension': { - 'name': 'ext1', - 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0', - 'alias': 'RS-PIE', - 'updated': '2011-01-22T13:25:27-06:00', - 'description': 'Adds the capability to share an image.', - 'links': [ - { - 'rel': 'describedby', - 'type': 'application/pdf', - 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf', - }, - { - 'rel': 'describedby', - 'type': 'application/vnd.sun.wadl+xml', - 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl', - }, - ], - }, - } + data = {'extension': { + 'name': 'ext1', + 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0', + 'alias': 'RS-PIE', + 'updated': '2011-01-22T13:25:27-06:00', + 'description': 'Adds the capability to share an image.', + 'links': [{'rel': 'describedby', + 'type': 'application/pdf', + 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'}, + {'rel': 'describedby', + 'type': 'application/vnd.sun.wadl+xml', + 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'}]}} xml = serializer.serialize(data, 'show') print xml @@ -378,48 +367,30 @@ class ExtensionsXMLSerializerTest(test.TestCase): def test_serialize_extensions(self): serializer = extensions.ExtensionsXMLSerializer() - data = { - "extensions": [ - { - "name": "Public Image Extension", - "namespace": "http://foo.com/api/ext/pie/v1.0", - "alias": "RS-PIE", - "updated": "2011-01-22T13:25:27-06:00", - "description": "Adds the capability to share an image.", - "links": [ - { - "rel": "describedby", + data = {"extensions": [{ + "name": "Public Image Extension", + "namespace": "http://foo.com/api/ext/pie/v1.0", + "alias": "RS-PIE", + "updated": "2011-01-22T13:25:27-06:00", + "description": "Adds the capability to share an image.", + "links": [{"rel": "describedby", "type": "application/pdf", - "href": "http://foo.com/api/ext/cs-pie.pdf", - }, - { - "rel": "describedby", "type": "application/vnd.sun.wadl+xml", - "href": "http://foo.com/api/ext/cs-pie.wadl", - }, - ], - }, - { - "name": "Cloud Block Storage", - "namespace": "http://foo.com/api/ext/cbs/v1.0", - "alias": "RS-CBS", - "updated": "2011-01-12T11:22:33-06:00", - "description": "Allows mounting cloud block storage.", - "links": [ - { - "rel": "describedby", - "type": "application/pdf", - "href": "http://foo.com/api/ext/cs-cbs.pdf", - }, - { - "rel": "describedby", + "href": "http://foo.com/api/ext/cs-pie.pdf"}, + {"rel": "describedby", "type": "application/vnd.sun.wadl+xml", - "href": "http://foo.com/api/ext/cs-cbs.wadl", - }, - ], - }, - ], - } + "href": "http://foo.com/api/ext/cs-pie.wadl"}]}, + {"name": "Cloud Block Storage", + "namespace": "http://foo.com/api/ext/cbs/v1.0", + "alias": "RS-CBS", + "updated": "2011-01-12T11:22:33-06:00", + "description": "Allows mounting cloud block storage.", + "links": [{"rel": "describedby", + "type": "application/pdf", + "href": "http://foo.com/api/ext/cs-cbs.pdf"}, + {"rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://foo.com/api/ext/cs-cbs.wadl"}]}]} xml = serializer.serialize(data, 'index') print xml diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 1dc3c3a17..801b06230 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -915,86 +915,56 @@ class LimitsViewBuilderV11Test(test.TestCase): def setUp(self): self.view_builder = views.limits.ViewBuilderV11() - self.rate_limits = [ - { - "URI": "*", - "regex": ".*", - "value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "resetTime": 1311272226, - }, - { - "URI": "*/servers", - "regex": "^/servers", - "value": 50, - "verb": "POST", - "remaining": 10, - "unit": "DAY", - "resetTime": 1311272226, - }, - ] - self.absolute_limits = { - "metadata_items": 1, - "injected_files": 5, - "injected_file_content_bytes": 5, - } + self.rate_limits = [{"URI": "*", + "regex": ".*", + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "resetTime": 1311272226}, + {"URI": "*/servers", + "regex": "^/servers", + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "resetTime": 1311272226}] + self.absolute_limits = {"metadata_items": 1, + "injected_files": 5, + "injected_file_content_bytes": 5} def tearDown(self): pass def test_build_limits(self): - expected_limits = { - "limits": { - "rate": [ - { - "uri": "*", - "regex": ".*", - "limit": [ - { - "value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "next-available": "2011-07-21T18:17:06Z", - }, - ] - }, - { - "uri": "*/servers", - "regex": "^/servers", - "limit": [ - { - "value": 50, - "verb": "POST", - "remaining": 10, - "unit": "DAY", - "next-available": "2011-07-21T18:17:06Z", - }, - ] - }, - ], - "absolute": { - "maxServerMeta": 1, - "maxImageMeta": 1, - "maxPersonality": 5, - "maxPersonalitySize": 5 - } - } - } + expected_limits = {"limits": { + "rate": [{ + "uri": "*", + "regex": ".*", + "limit": [{"value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-07-21T18:17:06Z"}]}, + {"uri": "*/servers", + "regex": "^/servers", + "limit": [{"value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": "2011-07-21T18:17:06Z"}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 5}}} output = self.view_builder.build(self.rate_limits, self.absolute_limits) self.assertDictMatch(output, expected_limits) def test_build_limits_empty_limits(self): - expected_limits = { - "limits": { - "rate": [], - "absolute": {}, - } - } + expected_limits = {"limits": {"rate": [], + "absolute": {}}} abs_limits = {} rate_limits = [] @@ -1012,45 +982,28 @@ class LimitsXMLSerializationTest(test.TestCase): def test_index(self): serializer = limits.LimitsXMLSerializer() - - fixture = { - "limits": { - "rate": [ - { - "uri": "*", - "regex": ".*", - "limit": [ - { - "value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "next-available": "2011-12-15T22:42:45Z", - }, - ] - }, - { - "uri": "*/servers", - "regex": "^/servers", - "limit": [ - { - "value": 50, - "verb": "POST", - "remaining": 10, - "unit": "DAY", - "next-available": "2011-12-15T22:42:45Z" - }, - ] - }, - ], - "absolute": { - "maxServerMeta": 1, - "maxImageMeta": 1, - "maxPersonality": 5, - "maxPersonalitySize": 10240 - } - } - } + fixture = {"limits": { + "rate": [{ + "uri": "*", + "regex": ".*", + "limit": [{ + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z"}]}, + {"uri": "*/servers", + "regex": "^/servers", + "limit": [{ + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": "2011-12-15T22:42:45Z"}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 10240}}} output = serializer.serialize(fixture, 'index') actual = minidom.parseString(output.replace(" ", "")) @@ -1083,12 +1036,9 @@ class LimitsXMLSerializationTest(test.TestCase): def test_index_no_limits(self): serializer = limits.LimitsXMLSerializer() - fixture = { - "limits": { - "rate": [], - "absolute": {}, - } - } + fixture = {"limits": { + "rate": [], + "absolute": {}}} output = serializer.serialize(fixture, 'index') actual = minidom.parseString(output.replace(" ", "")) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 717e11c00..687a19390 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -352,7 +352,7 @@ class ServerActionsTest(test.TestCase): req.body = json.dumps(body) req.headers["content-type"] = "application/json" response = req.get_response(fakes.wsgi_app()) - self.assertEqual(501, response.status_int) + self.assertEqual(400, response.status_int) def test_create_backup_with_metadata(self): self.flags(allow_admin_api=True) @@ -487,6 +487,24 @@ class ServerActionsTestV11(test.TestCase): def tearDown(self): self.stubs.UnsetAll() + def test_server_bad_body(self): + body = {} + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_unknown_action(self): + body = {'sockTheFox': {'fakekey': '1234'}} + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_server_change_password(self): mock_method = MockSetAdminPassword() self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 1e6552c6a..08eb1eb3c 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -134,8 +134,8 @@ def return_security_group(context, instance_id, security_group_id): pass -def instance_update(context, instance_id, kwargs): - return stub_instance(instance_id) +def instance_update(context, instance_id, values): + return stub_instance(instance_id, name=values.get('display_name')) def instance_addresses(context, instance_id): @@ -145,7 +145,7 @@ def instance_addresses(context, instance_id): def stub_instance(id, user_id='fake', project_id='fake', private_address=None, public_addresses=None, host=None, power_state=0, reservation_id="", uuid=FAKE_UUID, image_ref="10", - flavor_id="1", interfaces=None): + flavor_id="1", interfaces=None, name=None): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) @@ -161,7 +161,7 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, host = str(host) # ReservationID isn't sent back, hack it in there. - server_name = "server%s" % id + server_name = name or "server%s" % id if reservation_id != "": server_name = "reservation_%s" % (reservation_id, ) @@ -1880,13 +1880,17 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 400) def test_update_server_name_v1_1(self): + self.stubs.Set(nova.db.api, 'instance_get', + return_server_with_attributes(name='server_test')) req = webob.Request.blank('/v1.1/servers/1') req.method = 'PUT' req.content_type = 'application/json' - req.body = json.dumps({'server': {'name': 'new-name'}}) + req.body = json.dumps({'server': {'name': 'server_test'}}) res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 204) - self.assertEqual(res.body, '') + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['name'], 'server_test') def test_update_server_adminPass_ignored_v1_1(self): inst_dict = dict(name='server_test', adminPass='bacon') @@ -1897,16 +1901,19 @@ class ServersTest(test.TestCase): self.assertEqual(params, filtered_dict) return filtered_dict - self.stubs.Set(nova.db.api, 'instance_update', - server_update) + self.stubs.Set(nova.db.api, 'instance_update', server_update) + self.stubs.Set(nova.db.api, 'instance_get', + return_server_with_attributes(name='server_test')) req = webob.Request.blank('/v1.1/servers/1') req.method = 'PUT' req.content_type = "application/json" req.body = self.body res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 204) - self.assertEqual(res.body, '') + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['name'], 'server_test') def test_create_backup_schedules(self): req = webob.Request.blank('/v1.0/servers/1/backup_schedule') @@ -3209,8 +3216,7 @@ class ServersViewBuilderV11Test(test.TestCase): address_builder, flavor_builder, image_builder, - base_url, - ) + base_url) return view_builder def test_build_server(self): diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index d51b19ccd..f2a19f22d 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -32,6 +32,7 @@ class FakeGlance(object): IMAGE_RAMDISK = 3 IMAGE_RAW = 4 IMAGE_VHD = 5 + IMAGE_ISO = 6 IMAGE_FIXTURES = { IMAGE_MACHINE: { @@ -58,6 +59,11 @@ class FakeGlance(object): 'image_meta': {'name': 'fakevhd', 'size': 0, 'disk_format': 'vhd', 'container_format': 'ovf'}, + 'image_data': StringIO.StringIO('')}, + IMAGE_ISO: { + 'image_meta': {'name': 'fakeiso', 'size': 0, + 'disk_format': 'iso', + 'container_format': 'bare'}, 'image_data': StringIO.StringIO('')}} def __init__(self, host, port=None, use_ssl=False, auth_tok=None): diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 7a26fd1bb..33461025f 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -36,8 +36,9 @@ from nova import test from nova import rpc from nova import utils from nova.scheduler import api -from nova.scheduler import manager from nova.scheduler import driver +from nova.scheduler import manager +from nova.scheduler import multi from nova.compute import power_state @@ -391,7 +392,7 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() - def test_wont_sechedule_if_specified_host_is_down_no_queue(self): + def test_wont_schedule_if_specified_host_is_down_no_queue(self): compute1 = service.Service('host1', 'nova-compute', 'compute', @@ -643,10 +644,13 @@ class SimpleDriverTestCase(test.TestCase): self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') driver_i._live_migration_src_check(nocare, nocare) - driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) - driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) + driver_i._live_migration_dest_check(nocare, nocare, + i_ref['host'], False) + driver_i._live_migration_common_check(nocare, nocare, + i_ref['host'], False) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} + kwargs = {'instance_id': instance_id, 'dest': i_ref['host'], + 'block_migration': False} rpc.cast(self.context, db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), {"method": 'live_migration', "args": kwargs}) @@ -654,7 +658,8 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, - dest=i_ref['host']) + dest=i_ref['host'], + block_migration=False) i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref['state_description'] == 'migrating') @@ -735,7 +740,7 @@ class SimpleDriverTestCase(test.TestCase): self.assertRaises(exception.ComputeServiceUnavailable, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) + self.context, i_ref, i_ref['host'], False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -748,7 +753,7 @@ class SimpleDriverTestCase(test.TestCase): self.assertRaises(exception.UnableToMigrateToSelf, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) + self.context, i_ref, i_ref['host'], False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -756,15 +761,33 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() + instance_id2 = self._create_instance(host='somewhere', + memory_mb=12) i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=12) + s_ref = self._create_compute_service(host='somewhere') self.assertRaises(exception.MigrationError, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, 'somewhere') + self.context, i_ref, 'somewhere', False) db.instance_destroy(self.context, instance_id) + db.instance_destroy(self.context, instance_id2) + db.service_destroy(self.context, s_ref['id']) + + def test_block_migration_dest_check_service_lack_disk(self): + """Confirms exception raises when dest doesn't have enough disk.""" + instance_id = self._create_instance() + instance_id2 = self._create_instance(host='somewhere', + local_gb=70) + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere') + + self.assertRaises(exception.MigrationError, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, 'somewhere', True) + + db.instance_destroy(self.context, instance_id) + db.instance_destroy(self.context, instance_id2) db.service_destroy(self.context, s_ref['id']) def test_live_migration_dest_check_service_works_correctly(self): @@ -776,7 +799,8 @@ class SimpleDriverTestCase(test.TestCase): ret = self.scheduler.driver._live_migration_dest_check(self.context, i_ref, - 'somewhere') + 'somewhere', + False) self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -809,9 +833,10 @@ class SimpleDriverTestCase(test.TestCase): "args": {'filename': fpath}}) self.mox.ReplayAll() - self.assertRaises(exception.SourceHostUnavailable, + #self.assertRaises(exception.SourceHostUnavailable, + self.assertRaises(exception.FileNotFound, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -835,7 +860,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.InvalidHypervisorType, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -861,7 +886,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -893,7 +918,8 @@ class SimpleDriverTestCase(test.TestCase): try: self.scheduler.driver._live_migration_common_check(self.context, i_ref, - dest) + dest, + False) except rpc.RemoteError, e: c = (e.message.find(_("doesn't have compatibility to")) >= 0) @@ -903,6 +929,25 @@ class SimpleDriverTestCase(test.TestCase): db.service_destroy(self.context, s_ref2['id']) +class MultiDriverTestCase(SimpleDriverTestCase): + """Test case for multi driver.""" + + def setUp(self): + super(MultiDriverTestCase, self).setUp() + self.flags(connection_type='fake', + stub_network=True, + max_cores=4, + max_gigabytes=4, + network_manager='nova.network.manager.FlatManager', + volume_driver='nova.volume.driver.FakeISCSIDriver', + compute_scheduler_driver=('nova.scheduler.simple' + '.SimpleScheduler'), + volume_scheduler_driver=('nova.scheduler.simple' + '.SimpleScheduler'), + scheduler_driver='nova.scheduler.multi.MultiScheduler') + self.scheduler = manager.SchedulerManager() + + class FakeZone(object): def __init__(self, id, api_url, username, password): self.id = id diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 73c9bd78d..e2fa3b140 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -632,7 +632,7 @@ class ComputeTestCase(test.TestCase): vid = i_ref['volumes'][i]['id'] volmock.setup_compute_volume(c, vid).InAnyOrder('g1') drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref) + drivermock.ensure_filtering_rules_for_instance(i_ref, []) self.compute.db = dbmock self.compute.volume_manager = volmock @@ -657,7 +657,7 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(compute_manager.LOG, 'info') compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref) + drivermock.ensure_filtering_rules_for_instance(i_ref, []) self.compute.db = dbmock self.compute.driver = drivermock @@ -714,11 +714,15 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}) + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}) + self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, - self.compute.recover_live_migration) + self.compute.rollback_live_migration, + False) self.compute.db = dbmock self.mox.ReplayAll() @@ -739,13 +743,18 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}).\ + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, 'host': i_ref['host']}) for v in i_ref['volumes']: dbmock.volume_update(c, v['id'], {'status': 'in-use'}) + # mock for volume_api.remove_from_compute + rpc.call(c, topic, {"method": "remove_volume", + "args": {'volume_id': v['id']}}) self.compute.db = dbmock self.mox.ReplayAll() @@ -766,7 +775,9 @@ class ComputeTestCase(test.TestCase): AndReturn(topic) self.mox.StubOutWithMock(rpc, 'call') rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}).\ + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, @@ -791,11 +802,14 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}) + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}) self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, - self.compute.recover_live_migration) + self.compute.rollback_live_migration, + False) self.compute.db = dbmock self.mox.ReplayAll() @@ -829,6 +843,10 @@ class ComputeTestCase(test.TestCase): self.compute.volume_manager.remove_compute_volume(c, v['id']) self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') self.compute.driver.unfilter_instance(i_ref, []) + self.mox.StubOutWithMock(rpc, 'call') + rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), + {"method": "post_live_migration_at_destination", + "args": {'instance_id': i_ref['id'], 'block_migration': False}}) # executing self.mox.ReplayAll() diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 2180cf4f0..688518bb8 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -21,6 +21,7 @@ import os import re import shutil import sys +import tempfile from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom @@ -49,18 +50,19 @@ def _create_network_info(count=1, ipv6=None): if ipv6 is None: ipv6 = FLAGS.use_ipv6 fake = 'fake' - fake_ip = '0.0.0.0/0' - fake_ip_2 = '0.0.0.1/0' - fake_ip_3 = '0.0.0.1/0' + fake_ip = '10.11.12.13' + fake_ip_2 = '0.0.0.1' + fake_ip_3 = '0.0.0.1' fake_vlan = 100 fake_bridge_interface = 'eth0' network = {'bridge': fake, 'cidr': fake_ip, 'cidr_v6': fake_ip, + 'gateway_v6': fake, 'vlan': fake_vlan, 'bridge_interface': fake_bridge_interface} mapping = {'mac': fake, - 'dhcp_server': fake, + 'dhcp_server': '10.0.0.1', 'gateway': fake, 'gateway6': fake, 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} @@ -71,12 +73,12 @@ def _create_network_info(count=1, ipv6=None): return [(network, mapping) for x in xrange(0, count)] -def _setup_networking(instance_id, ip='1.2.3.4'): +def _setup_networking(instance_id, ip='1.2.3.4', mac='56:12:12:12:12:12'): ctxt = context.get_admin_context() network_ref = db.project_get_networks(ctxt, 'fake', associate=True)[0] - vif = {'address': '56:12:12:12:12:12', + vif = {'address': mac, 'network_id': network_ref['id'], 'instance_id': instance_id} vif_ref = db.virtual_interface_create(ctxt, vif) @@ -273,15 +275,14 @@ class LibvirtConnTestCase(test.TestCase): conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, self.test_instance) - result = conn._prepare_xml_info(instance_ref, False) - self.assertFalse(result['nics']) - - result = conn._prepare_xml_info(instance_ref, False, - _create_network_info()) + result = conn._prepare_xml_info(instance_ref, + _create_network_info(), + False) self.assertTrue(len(result['nics']) == 1) - result = conn._prepare_xml_info(instance_ref, False, - _create_network_info(2)) + result = conn._prepare_xml_info(instance_ref, + _create_network_info(2), + False) self.assertTrue(len(result['nics']) == 2) def test_xml_and_uri_no_ramdisk_no_kernel(self): @@ -408,16 +409,16 @@ class LibvirtConnTestCase(test.TestCase): network_info = _create_network_info(2) conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, instance_data) - xml = conn.to_xml(instance_ref, False, network_info) + xml = conn.to_xml(instance_ref, network_info, False) tree = xml_to_tree(xml) interfaces = tree.findall("./devices/interface") self.assertEquals(len(interfaces), 2) parameters = interfaces[0].findall('./filterref/parameter') self.assertEquals(interfaces[0].get('type'), 'bridge') self.assertEquals(parameters[0].get('name'), 'IP') - self.assertEquals(parameters[0].get('value'), '0.0.0.0/0') + self.assertEquals(parameters[0].get('value'), '10.11.12.13') self.assertEquals(parameters[1].get('name'), 'DHCPSERVER') - self.assertEquals(parameters[1].get('value'), 'fake') + self.assertEquals(parameters[1].get('value'), '10.0.0.1') def _check_xml_and_container(self, instance): user_context = context.RequestContext(self.user_id, @@ -431,7 +432,8 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, 'lxc:///') - xml = conn.to_xml(instance_ref) + network_info = _create_network_info() + xml = conn.to_xml(instance_ref, network_info) tree = xml_to_tree(xml) check = [ @@ -528,17 +530,20 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, expected_uri) - xml = conn.to_xml(instance_ref, rescue) + network_info = _create_network_info() + xml = conn.to_xml(instance_ref, network_info, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, - '%s failed check %d' % (xml, i)) + '%s != %s failed check %d' % + (check(tree), expected_result, i)) for i, (check, expected_result) in enumerate(common_checks): self.assertEqual(check(tree), expected_result, - '%s failed common check %d' % (xml, i)) + '%s != %s failed common check %d' % + (check(tree), expected_result, i)) # This test is supposed to make sure we don't # override a specifically set uri @@ -623,7 +628,7 @@ class LibvirtConnTestCase(test.TestCase): return # Preparing mocks - def fake_none(self): + def fake_none(self, *args): return def fake_raise(self): @@ -640,6 +645,7 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock() instance_ref = db.instance_create(self.context, self.test_instance) + network_info = _create_network_info() # Start test self.mox.ReplayAll() @@ -649,6 +655,7 @@ class LibvirtConnTestCase(test.TestCase): conn.firewall_driver.setattr('prepare_instance_filter', fake_none) conn.firewall_driver.setattr('instance_filter_exists', fake_none) conn.ensure_filtering_rules_for_instance(instance_ref, + network_info, time=fake_timer) except exception.Error, e: c1 = (0 <= e.message.find('Timeout migrating for')) @@ -690,17 +697,20 @@ class LibvirtConnTestCase(test.TestCase): return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) - self.mox.StubOutWithMock(self.compute, "recover_live_migration") - self.compute.recover_live_migration(self.context, instance_ref, - dest='dest') - - # Start test +# self.mox.StubOutWithMock(self.compute, "recover_live_migration") + self.mox.StubOutWithMock(self.compute, "rollback_live_migration") +# self.compute.recover_live_migration(self.context, instance_ref, +# dest='dest') + self.compute.rollback_live_migration(self.context, instance_ref, + 'dest', False) + + #start test self.mox.ReplayAll() conn = connection.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn._live_migration, - self.context, instance_ref, 'dest', '', - self.compute.recover_live_migration) + self.context, instance_ref, 'dest', False, + self.compute.rollback_live_migration) instance_ref = db.instance_get(self.context, instance_ref['id']) self.assertTrue(instance_ref['state_description'] == 'running') @@ -711,6 +721,95 @@ class LibvirtConnTestCase(test.TestCase): db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id']) + def test_pre_block_migration_works_correctly(self): + """Confirms pre_block_migration works correctly.""" + + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Replace instances_path since this testcase creates tmpfile + tmpdir = tempfile.mkdtemp() + store = FLAGS.instances_path + FLAGS.instances_path = tmpdir + + # Test data + instance_ref = db.instance_create(self.context, self.test_instance) + dummyjson = '[{"path": "%s/disk", "local_gb": "10G", "type": "raw"}]' + + # Preparing mocks + # qemu-img should be mockd since test environment might not have + # large disk space. + self.mox.StubOutWithMock(utils, "execute") + utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw', + '%s/%s/disk' % (tmpdir, instance_ref.name), '10G') + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + conn.pre_block_migration(self.context, instance_ref, + dummyjson % tmpdir) + + self.assertTrue(os.path.exists('%s/%s/' % + (tmpdir, instance_ref.name))) + + shutil.rmtree(tmpdir) + db.instance_destroy(self.context, instance_ref['id']) + # Restore FLAGS.instances_path + FLAGS.instances_path = store + + def test_get_instance_disk_info_works_correctly(self): + """Confirms pre_block_migration works correctly.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Test data + instance_ref = db.instance_create(self.context, self.test_instance) + dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" + "<devices>" + "<disk type='file'><driver name='qemu' type='raw'/>" + "<source file='/test/disk'/>" + "<target dev='vda' bus='virtio'/></disk>" + "<disk type='file'><driver name='qemu' type='qcow2'/>" + "<source file='/test/disk.local'/>" + "<target dev='vdb' bus='virtio'/></disk>" + "</devices></domain>") + + ret = ("image: /test/disk\nfile format: raw\n" + "virtual size: 20G (21474836480 bytes)\ndisk size: 3.1G\n") + + # Preparing mocks + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "XMLDesc") + vdmock.XMLDesc(0).AndReturn(dummyxml) + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return vdmock + self.create_fake_libvirt_mock(lookupByName=fake_lookup) + + self.mox.StubOutWithMock(os.path, "getsize") + # based on above testdata, one is raw image, so getsize is mocked. + os.path.getsize("/test/disk").AndReturn(10 * 1024 * 1024 * 1024) + # another is qcow image, so qemu-img should be mocked. + self.mox.StubOutWithMock(utils, "execute") + utils.execute('sudo', 'qemu-img', 'info', '/test/disk.local').\ + AndReturn((ret, '')) + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + info = conn.get_instance_disk_info(self.context, instance_ref) + info = utils.loads(info) + + self.assertTrue(info[0]['type'] == 'raw' and + info[1]['type'] == 'qcow2' and + info[0]['path'] == '/test/disk' and + info[1]['path'] == '/test/disk.local' and + info[0]['local_gb'] == '10G' and + info[1]['local_gb'] == '20G') + + db.instance_destroy(self.context, instance_ref['id']) + def test_spawn_with_network_info(self): # Skip if non-libvirt environment if not self.lazy_load_library_exists(): @@ -884,7 +983,11 @@ class IptablesFirewallTestCase(test.TestCase): def test_static_filters(self): instance_ref = self._create_instance_ref() - _setup_networking(instance_ref['id'], self.test_ip) + src_instance_ref = self._create_instance_ref() + src_ip = '10.11.12.14' + src_mac = '56:12:12:12:12:13' + _setup_networking(instance_ref['id'], self.test_ip, src_mac) + _setup_networking(src_instance_ref['id'], src_ip) admin_ctxt = context.get_admin_context() secgroup = db.security_group_create(admin_ctxt, @@ -893,6 +996,12 @@ class IptablesFirewallTestCase(test.TestCase): 'name': 'testgroup', 'description': 'test group'}) + src_secgroup = db.security_group_create(admin_ctxt, + {'user_id': 'fake', + 'project_id': 'fake', + 'name': 'testsourcegroup', + 'description': 'src group'}) + db.security_group_rule_create(admin_ctxt, {'parent_group_id': secgroup['id'], 'protocol': 'icmp', @@ -914,9 +1023,19 @@ class IptablesFirewallTestCase(test.TestCase): 'to_port': 81, 'cidr': '192.168.10.0/24'}) + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'tcp', + 'from_port': 80, + 'to_port': 81, + 'group_id': src_secgroup['id']}) + db.instance_add_security_group(admin_ctxt, instance_ref['id'], secgroup['id']) + db.instance_add_security_group(admin_ctxt, src_instance_ref['id'], + src_secgroup['id']) instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) + src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id']) # self.fw.add_instance(instance_ref) def fake_iptables_execute(*cmd, **kwargs): @@ -942,8 +1061,9 @@ class IptablesFirewallTestCase(test.TestCase): from nova.network import linux_net linux_net.iptables_manager.execute = fake_iptables_execute - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) + network_info = _create_network_info() + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) in_rules = filter(lambda l: not l.startswith('#'), self.in_filter_rules) @@ -969,17 +1089,22 @@ class IptablesFirewallTestCase(test.TestCase): self.assertTrue(security_group_chain, "The security group chain wasn't added") - regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT') + regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24') self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "ICMP acceptance rule wasn't added") - regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp ' - '--icmp-type 8 -j ACCEPT') + regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8' + ' -s 192.168.11.0/24') self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "ICMP Echo Request acceptance rule wasn't added") - regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport ' - '--dports 80:81 -j ACCEPT') + regex = re.compile('-A .* -j ACCEPT -p tcp -m multiport ' + '--dports 80:81 -s %s' % (src_ip,)) + self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, + "TCP port 80/81 acceptance rule wasn't added") + + regex = re.compile('-A .* -j ACCEPT -p tcp ' + '-m multiport --dports 80:81 -s 192.168.10.0/24') self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "TCP port 80/81 acceptance rule wasn't added") db.instance_destroy(admin_ctxt, instance_ref['id']) @@ -1008,7 +1133,7 @@ class IptablesFirewallTestCase(test.TestCase): ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, network_info) - self.fw.add_filters_for_instance(instance_ref, network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) ipv4 = self.fw.iptables.ipv4['filter'].rules ipv6 = self.fw.iptables.ipv6['filter'].rules ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len @@ -1023,7 +1148,7 @@ class IptablesFirewallTestCase(test.TestCase): self.mox.StubOutWithMock(self.fw, 'add_filters_for_instance', use_mock_anything=True) - self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg()) + self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg()) self.fw.instances[instance_ref['id']] = instance_ref self.mox.ReplayAll() self.fw.do_refresh_security_group_rules("fake") @@ -1043,11 +1168,12 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = self._create_instance_ref() _setup_networking(instance_ref['id'], self.test_ip) - self.fw.setup_basic_filtering(instance_ref) - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance_ref, network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) original_filter_count = len(fakefilter.filters) - self.fw.unfilter_instance(instance_ref) + self.fw.unfilter_instance(instance_ref, network_info) # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1) @@ -1057,14 +1183,14 @@ class IptablesFirewallTestCase(test.TestCase): def test_provider_firewall_rules(self): # setup basic instance data instance_ref = self._create_instance_ref() - nw_info = _create_network_info(1) _setup_networking(instance_ref['id'], self.test_ip) # FRAGILE: peeks at how the firewall names chains chain_name = 'inst-%s' % instance_ref['id'] # create a firewall via setup_basic_filtering like libvirt_conn.spawn # should have a chain with 0 rules - self.fw.setup_basic_filtering(instance_ref, network_info=nw_info) + network_info = _create_network_info(1) + self.fw.setup_basic_filtering(instance_ref, network_info) self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains) rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == 'provider'] @@ -1094,8 +1220,8 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEqual(2, len(rules)) # create the instance filter and make sure it has a jump rule - self.fw.prepare_instance_filter(instance_ref, network_info=nw_info) - self.fw.apply_instance_filter(instance_ref) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == chain_name] jump_rules = [rule for rule in inst_rules if '-j' in rule.rule] @@ -1247,7 +1373,7 @@ class NWFilterTestCase(test.TestCase): def _ensure_all_called(): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], - '561212121212') + 'fake') secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] for required in [secgroup_filter, 'allow-dhcp-server', 'no-arp-spoofing', 'no-ip-spoofing', @@ -1263,9 +1389,10 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance, network_info) + self.fw.prepare_instance_filter(instance, network_info) + self.fw.apply_instance_filter(instance, network_info) _ensure_all_called() self.teardown_security_group() db.instance_destroy(context.get_admin_context(), instance_ref['id']) @@ -1296,11 +1423,12 @@ class NWFilterTestCase(test.TestCase): instance = db.instance_get(self.context, inst_id) _setup_networking(instance_ref['id'], self.test_ip) - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance, network_info) + self.fw.prepare_instance_filter(instance, network_info) + self.fw.apply_instance_filter(instance, network_info) original_filter_count = len(fakefilter.filters) - self.fw.unfilter_instance(instance) + self.fw.unfilter_instance(instance, network_info) # should undefine 2 filters: instance and instance-secgroup self.assertEqual(original_filter_count - len(fakefilter.filters), 2) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 38bb13bed..f5ab5ae5a 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -266,9 +266,12 @@ class VlanNetworkTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') + self.mox.StubOutWithMock(db, 'instance_get') - db.fixed_ip_associate_by_address(mox.IgnoreArg(), - mox.IgnoreArg(), + db.instance_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn({'security_groups': + [{'id': 0}]}) + db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.1') db.fixed_ip_update(mox.IgnoreArg(), @@ -359,6 +362,17 @@ class CommonNetworkTestCase(test.TestCase): return [dict(address='10.0.0.0'), dict(address='10.0.0.1'), dict(address='10.0.0.2')] + def network_get_by_cidr(self, context, cidr): + raise exception.NetworkNotFoundForCidr() + + def network_create_safe(self, context, net): + fakenet = {} + fakenet['id'] = 999 + return fakenet + + def network_get_all(self, context): + raise exception.NoNetworksFound() + def __init__(self): self.db = self.FakeDB() self.deallocate_called = None @@ -366,6 +380,9 @@ class CommonNetworkTestCase(test.TestCase): def deallocate_fixed_ip(self, context, address): self.deallocate_called = address + def fake_create_fixed_ips(self, context, network_id): + return None + def test_remove_fixed_ip_from_instance(self): manager = self.FakeNetworkManager() manager.remove_fixed_ip_from_instance(None, 99, '10.0.0.1') @@ -377,3 +394,165 @@ class CommonNetworkTestCase(test.TestCase): self.assertRaises(exception.FixedIpNotFoundForSpecificInstance, manager.remove_fixed_ip_from_instance, None, 99, 'bad input') + + def test_validate_cidrs(self): + manager = self.FakeNetworkManager() + nets = manager._validate_cidrs(None, '192.168.0.0/24', 1, 256) + self.assertEqual(1, len(nets)) + cidrs = [str(net) for net in nets] + self.assertTrue('192.168.0.0/24' in cidrs) + + def test_validate_cidrs_split_exact_in_half(self): + manager = self.FakeNetworkManager() + nets = manager._validate_cidrs(None, '192.168.0.0/24', 2, 128) + self.assertEqual(2, len(nets)) + cidrs = [str(net) for net in nets] + self.assertTrue('192.168.0.0/25' in cidrs) + self.assertTrue('192.168.0.128/25' in cidrs) + + def test_validate_cidrs_split_cidr_in_use_middle_of_range(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.2.0/24'}]) + self.mox.ReplayAll() + nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) + self.assertEqual(4, len(nets)) + cidrs = [str(net) for net in nets] + exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', + '192.168.4.0/24'] + for exp_cidr in exp_cidrs: + self.assertTrue(exp_cidr in cidrs) + self.assertFalse('192.168.2.0/24' in cidrs) + + def test_validate_cidrs_smaller_subnet_in_use(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.2.9/25'}]) + self.mox.ReplayAll() + # ValueError: requested cidr (192.168.2.0/24) conflicts with + # existing smaller cidr + args = [None, '192.168.2.0/24', 1, 256] + self.assertRaises(ValueError, manager._validate_cidrs, *args) + + def test_validate_cidrs_split_smaller_cidr_in_use(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.2.0/25'}]) + self.mox.ReplayAll() + nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) + self.assertEqual(4, len(nets)) + cidrs = [str(net) for net in nets] + exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', + '192.168.4.0/24'] + for exp_cidr in exp_cidrs: + self.assertTrue(exp_cidr in cidrs) + self.assertFalse('192.168.2.0/24' in cidrs) + + def test_validate_cidrs_split_smaller_cidr_in_use2(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.2.9/29'}]) + self.mox.ReplayAll() + nets = manager._validate_cidrs(None, '192.168.2.0/24', 3, 32) + self.assertEqual(3, len(nets)) + cidrs = [str(net) for net in nets] + exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27'] + for exp_cidr in exp_cidrs: + self.assertTrue(exp_cidr in cidrs) + self.assertFalse('192.168.2.0/27' in cidrs) + + def test_validate_cidrs_split_all_in_use(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + in_use = [{'id': 1, 'cidr': '192.168.2.9/29'}, + {'id': 2, 'cidr': '192.168.2.64/26'}, + {'id': 3, 'cidr': '192.168.2.128/26'}] + manager.db.network_get_all(ctxt).AndReturn(in_use) + self.mox.ReplayAll() + args = [None, '192.168.2.0/24', 3, 64] + # ValueError: Not enough subnets avail to satisfy requested num_ + # networks - some subnets in requested range already + # in use + self.assertRaises(ValueError, manager._validate_cidrs, *args) + + def test_validate_cidrs_one_in_use(self): + manager = self.FakeNetworkManager() + args = [None, '192.168.0.0/24', 2, 256] + # ValueError: network_size * num_networks exceeds cidr size + self.assertRaises(ValueError, manager._validate_cidrs, *args) + + def test_validate_cidrs_already_used(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.0.0/24'}]) + self.mox.ReplayAll() + # ValueError: cidr already in use + args = [None, '192.168.0.0/24', 1, 256] + self.assertRaises(ValueError, manager._validate_cidrs, *args) + + def test_validate_cidrs_too_many(self): + manager = self.FakeNetworkManager() + args = [None, '192.168.0.0/24', 200, 256] + # ValueError: Not enough subnets avail to satisfy requested + # num_networks + self.assertRaises(ValueError, manager._validate_cidrs, *args) + + def test_validate_cidrs_split_partial(self): + manager = self.FakeNetworkManager() + nets = manager._validate_cidrs(None, '192.168.0.0/16', 2, 256) + returned_cidrs = [str(net) for net in nets] + self.assertTrue('192.168.0.0/24' in returned_cidrs) + self.assertTrue('192.168.1.0/24' in returned_cidrs) + + def test_validate_cidrs_conflict_existing_supernet(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}] + manager.db.network_get_all(ctxt).AndReturn(fakecidr) + self.mox.ReplayAll() + args = [None, '192.168.0.0/24', 1, 256] + # ValueError: requested cidr (192.168.0.0/24) conflicts + # with existing supernet + self.assertRaises(ValueError, manager._validate_cidrs, *args) + + def test_create_networks(self): + cidr = '192.168.0.0/24' + manager = self.FakeNetworkManager() + self.stubs.Set(manager, '_create_fixed_ips', + self.fake_create_fixed_ips) + args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, + None] + result = manager.create_networks(*args) + self.assertEqual(manager.create_networks(*args), None) + + def test_create_networks_cidr_already_used(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}] + manager.db.network_get_all(ctxt).AndReturn(fakecidr) + self.mox.ReplayAll() + args = [None, 'foo', '192.168.0.0/24', None, 1, 256, + 'fd00::/48', None, None, None] + self.assertRaises(ValueError, manager.create_networks, *args) + + def test_create_networks_many(self): + cidr = '192.168.0.0/16' + manager = self.FakeNetworkManager() + self.stubs.Set(manager, '_create_fixed_ips', + self.fake_create_fixed_ips) + args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None, + None] + self.assertEqual(manager.create_networks(*args), None) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 1deb5a780..2f0559366 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -519,6 +519,11 @@ class XenAPIVMTestCase(test.TestCase): os_type="windows", architecture="i386") self.check_vm_params_for_windows() + def test_spawn_iso_glance(self): + self._test_spawn(glance_stubs.FakeGlance.IMAGE_ISO, None, None, + os_type="windows", architecture="i386") + self.check_vm_params_for_windows() + def test_spawn_glance(self): self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, diff --git a/nova/virt/driver.py b/nova/virt/driver.py index df4a66ac2..20af2666d 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -252,7 +252,7 @@ class ComputeDriver(object): # TODO(Vek): Need to pass context in for access to auth_token pass - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """Setting up filtering rules and waiting for its completion. To migrate an instance, filtering rules to hypervisors diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 880702af1..dc0628772 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -487,16 +487,16 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') def live_migration(self, context, instance_ref, dest, - post_method, recover_method): + post_method, recover_method, block_migration=False): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref, network_info=None): + def unfilter_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 6d043577a..2b17e244a 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -32,7 +32,7 @@ Supports KVM, LXC, QEMU, UML, and XEN. :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). :injected_network_template: Template file for injected network -:allow_project_net_traffic: Whether to allow in project network traffic +:allow_same_net_traffic: Whether to allow in project network traffic """ @@ -96,9 +96,9 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') -flags.DEFINE_bool('allow_project_net_traffic', +flags.DEFINE_bool('allow_same_net_traffic', True, - 'Whether to allow in project network traffic') + 'Whether to allow network traffic from same network') flags.DEFINE_bool('use_cow_images', True, 'Whether to use cow images') @@ -117,6 +117,10 @@ flags.DEFINE_string('live_migration_uri', flags.DEFINE_string('live_migration_flag', "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", 'Define live migration behavior.') +flags.DEFINE_string('block_migration_flag', + "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, " + "VIR_MIGRATE_NON_SHARED_INC", + 'Define block migration behavior.') flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') flags.DEFINE_string('qemu_img', 'qemu-img', @@ -463,18 +467,18 @@ class LibvirtConnection(driver.ComputeDriver): """ virt_dom = self._conn.lookupByName(instance['name']) # NOTE(itoumsn): Use XML delived from the running instance - # instead of using to_xml(instance). This is almost the ultimate - # stupid workaround. + # instead of using to_xml(instance, network_info). This is almost + # the ultimate stupid workaround. xml = virt_dom.XMLDesc(0) # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is # better because we cannot ensure flushing dirty buffers # in the guest OS. But, in case of KVM, shutdown() does not work... self.destroy(instance, network_info, cleanup=False) self.plug_vifs(instance, network_info) - self.firewall_driver.setup_basic_filtering(instance) - self.firewall_driver.prepare_instance_filter(instance) + self.firewall_driver.setup_basic_filtering(instance, network_info) + self.firewall_driver.prepare_instance_filter(instance, network_info) self._create_new_domain(xml) - self.firewall_driver.apply_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance, network_info) def _wait_for_reboot(): """Called at an interval until the VM is running again.""" @@ -531,7 +535,7 @@ class LibvirtConnection(driver.ComputeDriver): """ self.destroy(instance, network_info, cleanup=False) - xml = self.to_xml(instance, rescue=True) + xml = self.to_xml(instance, network_info, rescue=True) rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} @@ -574,9 +578,9 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception() - def spawn(self, context, instance, - network_info=None, block_device_info=None): - xml = self.to_xml(instance, False, network_info=network_info, + def spawn(self, context, instance, network_info, + block_device_info=None): + xml = self.to_xml(instance, network_info, False, block_device_info=block_device_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) @@ -584,7 +588,7 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=block_device_info) domain = self._create_new_domain(xml) LOG.debug(_("instance %s: is running"), instance['name']) - self.firewall_driver.apply_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance, network_info) def _wait_for_boot(): """Called at an interval until the VM is running.""" @@ -727,6 +731,7 @@ class LibvirtConnection(driver.ComputeDriver): If cow is True, it will make a CoW image instead of a copy. """ + if not os.path.exists(target): base_dir = os.path.join(FLAGS.instances_path, '_base') if not os.path.exists(base_dir): @@ -988,14 +993,10 @@ class LibvirtConnection(driver.ComputeDriver): else: raise exception.InvalidDevicePath(path=device_path) - def _prepare_xml_info(self, instance, rescue=False, network_info=None, + def _prepare_xml_info(self, instance, network_info, rescue, block_device_info=None): block_device_mapping = driver.block_device_info_get_mapping( block_device_info) - # TODO(adiantum) remove network_info creation code - # when multinics will be completed - if not network_info: - network_info = netutils.get_network_info(instance) nics = [] for (network, mapping) in network_info: @@ -1082,11 +1083,11 @@ class LibvirtConnection(driver.ComputeDriver): xml_info['disk'] = xml_info['basepath'] + "/disk" return xml_info - def to_xml(self, instance, rescue=False, network_info=None, + def to_xml(self, instance, network_info, rescue=False, block_device_info=None): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) - xml_info = self._prepare_xml_info(instance, rescue, network_info, + xml_info = self._prepare_xml_info(instance, network_info, rescue, block_device_info) xml = str(Template(self.libvirt_xml, searchList=[xml_info])) LOG.debug(_('instance %s: finished toXML method'), instance['name']) @@ -1506,7 +1507,7 @@ class LibvirtConnection(driver.ComputeDriver): return - def ensure_filtering_rules_for_instance(self, instance_ref, + def ensure_filtering_rules_for_instance(self, instance_ref, network_info, time=None): """Setting up filtering rules and waiting for its completion. @@ -1536,14 +1537,15 @@ class LibvirtConnection(driver.ComputeDriver): # If any instances never launch at destination host, # basic-filtering must be set here. - self.firewall_driver.setup_basic_filtering(instance_ref) + self.firewall_driver.setup_basic_filtering(instance_ref, network_info) # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref) + self.firewall_driver.prepare_instance_filter(instance_ref, network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) while timeout_count: - if self.firewall_driver.instance_filter_exists(instance_ref): + if self.firewall_driver.instance_filter_exists(instance_ref, + network_info): break timeout_count.pop() if len(timeout_count) == 0: @@ -1552,7 +1554,7 @@ class LibvirtConnection(driver.ComputeDriver): time.sleep(1) def live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): + post_method, recover_method, block_migration=False): """Spawning live_migration operation for distributing high-load. :params ctxt: security context @@ -1560,20 +1562,22 @@ class LibvirtConnection(driver.ComputeDriver): nova.db.sqlalchemy.models.Instance object instance object that is migrated. :params dest: destination host + :params block_migration: destination host :params post_method: post operation method. expected nova.compute.manager.post_live_migration. :params recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. + :params block_migration: if true, do block migration. """ greenthread.spawn(self._live_migration, ctxt, instance_ref, dest, - post_method, recover_method) + post_method, recover_method, block_migration) - def _live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): + def _live_migration(self, ctxt, instance_ref, dest, post_method, + recover_method, block_migration=False): """Do live migration. :params ctxt: security context @@ -1592,27 +1596,21 @@ class LibvirtConnection(driver.ComputeDriver): # Do live migration. try: - flaglist = FLAGS.live_migration_flag.split(',') + if block_migration: + flaglist = FLAGS.block_migration_flag.split(',') + else: + flaglist = FLAGS.live_migration_flag.split(',') flagvals = [getattr(libvirt, x.strip()) for x in flaglist] logical_sum = reduce(lambda x, y: x | y, flagvals) - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) + dom = self._conn.lookupByName(instance_ref.name) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) except Exception: - recover_method(ctxt, instance_ref, dest=dest) + recover_method(ctxt, instance_ref, dest, block_migration) raise # Waiting for completion of live_migration. @@ -1624,11 +1622,150 @@ class LibvirtConnection(driver.ComputeDriver): self.get_info(instance_ref.name)['state'] except exception.NotFound: timer.stop() - post_method(ctxt, instance_ref, dest) + post_method(ctxt, instance_ref, dest, block_migration) timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) + def pre_block_migration(self, ctxt, instance_ref, disk_info_json): + """Preparation block migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params disk_info_json: + json strings specified in get_instance_disk_info + + """ + disk_info = utils.loads(disk_info_json) + + # make instance directory + instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name']) + if os.path.exists(instance_dir): + raise exception.DestinationDiskExists(path=instance_dir) + os.mkdir(instance_dir) + + for info in disk_info: + base = os.path.basename(info['path']) + # Get image type and create empty disk image. + instance_disk = os.path.join(instance_dir, base) + utils.execute('sudo', 'qemu-img', 'create', '-f', info['type'], + instance_disk, info['local_gb']) + + # if image has kernel and ramdisk, just download + # following normal way. + if instance_ref['kernel_id']: + user = manager.AuthManager().get_user(instance_ref['user_id']) + project = manager.AuthManager().get_project( + instance_ref['project_id']) + self._fetch_image(nova_context.get_admin_context(), + os.path.join(instance_dir, 'kernel'), + instance_ref['kernel_id'], + user, + project) + if instance_ref['ramdisk_id']: + self._fetch_image(nova_context.get_admin_context(), + os.path.join(instance_dir, 'ramdisk'), + instance_ref['ramdisk_id'], + user, + project) + + def post_live_migration_at_destination(self, ctxt, + instance_ref, + network_info, + block_migration): + """Post operation of live migration at destination host. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params network_info: instance network infomation + :params : block_migration: if true, post operation of block_migraiton. + """ + # Define migrated instance, otherwise, suspend/destroy does not work. + dom_list = self._conn.listDefinedDomains() + if instance_ref.name not in dom_list: + instance_dir = os.path.join(FLAGS.instances_path, + instance_ref.name) + xml_path = os.path.join(instance_dir, 'libvirt.xml') + # In case of block migration, destination does not have + # libvirt.xml + if not os.path.isfile(xml_path): + xml = self.to_xml(instance_ref, network_info=network_info) + f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+') + f.write(xml) + f.close() + # libvirt.xml should be made by to_xml(), but libvirt + # does not accept to_xml() result, since uuid is not + # included in to_xml() result. + dom = self._lookup_by_name(instance_ref.name) + self._conn.defineXML(dom.XMLDesc(0)) + + def get_instance_disk_info(self, ctxt, instance_ref): + """Preparation block migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :return: + json strings with below format. + "[{'path':'disk', 'type':'raw', 'local_gb':'10G'},...]" + + """ + disk_info = [] + + virt_dom = self._lookup_by_name(instance_ref.name) + xml = virt_dom.XMLDesc(0) + doc = libxml2.parseDoc(xml) + disk_nodes = doc.xpathEval('//devices/disk') + path_nodes = doc.xpathEval('//devices/disk/source') + driver_nodes = doc.xpathEval('//devices/disk/driver') + + for cnt, path_node in enumerate(path_nodes): + disk_type = disk_nodes[cnt].get_properties().getContent() + path = path_node.get_properties().getContent() + + if disk_type != 'file': + LOG.debug(_('skipping %(path)s since it looks like volume') % + locals()) + continue + + # In case of libvirt.xml, disk type can be obtained + # by the below statement. + # -> disk_type = driver_nodes[cnt].get_properties().getContent() + # but this xml is generated by kvm, format is slightly different. + disk_type = \ + driver_nodes[cnt].get_properties().get_next().getContent() + if disk_type == 'raw': + size = int(os.path.getsize(path)) + else: + out, err = utils.execute('sudo', 'qemu-img', 'info', path) + size = [i.split('(')[1].split()[0] for i in out.split('\n') + if i.strip().find('virtual size') >= 0] + size = int(size[0]) + + # block migration needs same/larger size of empty image on the + # destination host. since qemu-img creates bit smaller size image + # depending on original image size, fixed value is necessary. + for unit, divisor in [('G', 1024 ** 3), ('M', 1024 ** 2), + ('K', 1024), ('', 1)]: + if size / divisor == 0: + continue + if size % divisor != 0: + size = size / divisor + 1 + else: + size = size / divisor + size = str(size) + unit + break + + disk_info.append({'type': disk_type, 'path': path, + 'local_gb': size}) + + return utils.dumps(disk_info) + def unfilter_instance(self, instance_ref, network_info): """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance_ref, diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 9ce57b6c9..c2f4f91e8 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -40,17 +40,17 @@ except ImportError: class FirewallDriver(object): - def prepare_instance_filter(self, instance, network_info=None): + def prepare_instance_filter(self, instance, network_info): """Prepare filters for the instance. At this point, the instance isn't running yet.""" raise NotImplementedError() - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): """Stop filtering instance""" raise NotImplementedError() - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """Apply instance filter. Once this method returns, the instance should be firewalled @@ -60,9 +60,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def refresh_security_group_rules(self, - security_group_id, - network_info=None): + def refresh_security_group_rules(self, security_group_id): """Refresh security group rules from data store Gets called when a rule has been added to or removed from @@ -85,7 +83,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Create rules to block spoofing and allow dhcp. This gets called when spawning an instance, before @@ -94,7 +92,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" raise NotImplementedError() @@ -150,7 +148,7 @@ class NWFilterFirewall(FirewallDriver): self.static_filters_configured = False self.handle_security_groups = False - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter""" pass @@ -189,13 +187,10 @@ class NWFilterFirewall(FirewallDriver): </rule> </filter>''' - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" logging.info('called setup_basic_filtering in nwfilter') - if not network_info: - network_info = netutils.get_network_info(instance) - if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. @@ -237,7 +232,7 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self.nova_base_ipv6_filter) self._define_filter(self.nova_dhcp_filter) self._define_filter(self.nova_ra_filter) - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: self._define_filter(self.nova_project_filter) if FLAGS.use_ipv6: self._define_filter(self.nova_project_filter_v6) @@ -300,10 +295,8 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): """Clear out the nwfilter rules.""" - if not network_info: - network_info = netutils.get_network_info(instance) instance_name = instance.name for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') @@ -326,16 +319,13 @@ class NWFilterFirewall(FirewallDriver): LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' 'for %(instance_name)s is not found.') % locals()) - def prepare_instance_filter(self, instance, network_info=None): + def prepare_instance_filter(self, instance, network_info): """Creates an NWFilter for the given instance. In the process, it makes sure the filters for the provider blocks, security groups, and base filter are all in place. """ - if not network_info: - network_info = netutils.get_network_info(instance) - self.refresh_provider_fw_rules() ctxt = context.get_admin_context() @@ -388,7 +378,7 @@ class NWFilterFirewall(FirewallDriver): instance_filter_children = [base_filter, 'nova-provider-rules', instance_secgroup_filter_name] - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: instance_filter_children.append('nova-project') if FLAGS.use_ipv6: instance_filter_children.append('nova-project-v6') @@ -401,9 +391,7 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self._filter_container(filter_name, filter_children)) - def refresh_security_group_rules(self, - security_group_id, - network_info=None): + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -500,9 +488,8 @@ class NWFilterFirewall(FirewallDriver): return 'nova-instance-%s' % (instance['name']) return 'nova-instance-%s-%s' % (instance['name'], nic_id) - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" - network_info = netutils.get_network_info(instance) for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) @@ -521,6 +508,7 @@ class IptablesFirewallDriver(FirewallDriver): from nova.network import linux_net self.iptables = linux_net.iptables_manager self.instances = {} + self.network_infos = {} self.nwfilter = NWFilterFirewall(kwargs['get_connection']) self.basicly_filtered = False @@ -529,22 +517,22 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Set up provider rules and basic NWFilter.""" - if not network_info: - network_info = netutils.get_network_info(instance) self.nwfilter.setup_basic_filtering(instance, network_info) if not self.basicly_filtered: LOG.debug(_('iptables firewall: Setup Basic Filtering')) self.refresh_provider_fw_rules() self.basicly_filtered = True - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter""" pass - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): if self.instances.pop(instance['id'], None): + # NOTE(vish): use the passed info instead of the stored info + self.network_infos.pop(instance['id']) self.remove_filters_for_instance(instance) self.iptables.apply() self.nwfilter.unfilter_instance(instance, network_info) @@ -552,11 +540,10 @@ class IptablesFirewallDriver(FirewallDriver): LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) - def prepare_instance_filter(self, instance, network_info=None): - if not network_info: - network_info = netutils.get_network_info(instance) + def prepare_instance_filter(self, instance, network_info): self.instances[instance['id']] = instance - self.add_filters_for_instance(instance, network_info) + self.network_infos[instance['id']] = network_info + self.add_filters_for_instance(instance) self.iptables.apply() def _create_filter(self, ips, chain_name): @@ -583,7 +570,8 @@ class IptablesFirewallDriver(FirewallDriver): for rule in ipv6_rules: self.iptables.ipv6['filter'].add_rule(chain_name, rule) - def add_filters_for_instance(self, instance, network_info=None): + def add_filters_for_instance(self, instance): + network_info = self.network_infos[instance['id']] chain_name = self._instance_chain_name(instance) if FLAGS.use_ipv6: self.iptables.ipv6['filter'].add_chain(chain_name) @@ -601,9 +589,7 @@ class IptablesFirewallDriver(FirewallDriver): if FLAGS.use_ipv6: self.iptables.ipv6['filter'].remove_chain(chain_name) - def instance_rules(self, instance, network_info=None): - if not network_info: - network_info = netutils.get_network_info(instance) + def instance_rules(self, instance, network_info): ctxt = context.get_admin_context() ipv4_rules = [] @@ -621,14 +607,14 @@ class IptablesFirewallDriver(FirewallDriver): ipv4_rules += ['-j $provider'] ipv6_rules += ['-j $provider'] - dhcp_servers = [info['gateway'] for (_n, info) in network_info] + dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info] for dhcp_server in dhcp_servers: ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' '-j ACCEPT' % (dhcp_server,)) #Allow project network traffic - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: cidrs = [network['cidr'] for (network, _m) in network_info] for cidr in cidrs: ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) @@ -645,7 +631,7 @@ class IptablesFirewallDriver(FirewallDriver): '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) #Allow project network traffic - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: cidrv6s = [network['cidr_v6'] for (network, _m) in network_info] @@ -664,11 +650,10 @@ class IptablesFirewallDriver(FirewallDriver): LOG.debug(_('Adding security group rule: %r'), rule) if not rule.cidr: - # Eventually, a mechanism to grant access for security - # groups will turn up here. It'll use ipsets. - continue + version = 4 + else: + version = netutils.get_ip_version(rule.cidr) - version = netutils.get_ip_version(rule.cidr) if version == 4: fw_rules = ipv4_rules else: @@ -678,16 +663,16 @@ class IptablesFirewallDriver(FirewallDriver): if version == 6 and rule.protocol == 'icmp': protocol = 'icmpv6' - args = ['-p', protocol, '-s', rule.cidr] + args = ['-j ACCEPT', '-p', protocol] - if rule.protocol in ['udp', 'tcp']: + if protocol in ['udp', 'tcp']: if rule.from_port == rule.to_port: args += ['--dport', '%s' % (rule.from_port,)] else: args += ['-m', 'multiport', '--dports', '%s:%s' % (rule.from_port, rule.to_port)] - elif rule.protocol == 'icmp': + elif protocol == 'icmp': icmp_type = rule.from_port icmp_code = rule.to_port @@ -706,34 +691,44 @@ class IptablesFirewallDriver(FirewallDriver): args += ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg] - args += ['-j ACCEPT'] - fw_rules += [' '.join(args)] - + if rule.cidr: + LOG.info('Using cidr %r', rule.cidr) + args += ['-s', rule.cidr] + fw_rules += [' '.join(args)] + else: + if rule['grantee_group']: + for instance in rule['grantee_group']['instances']: + LOG.info('instance: %r', instance) + ips = db.instance_get_fixed_addresses(ctxt, + instance['id']) + LOG.info('ips: %r', ips) + for ip in ips: + subrule = args + ['-s %s' % ip] + fw_rules += [' '.join(subrule)] + + LOG.info('Using fw_rules: %r', fw_rules) ipv4_rules += ['-j $sg-fallback'] ipv6_rules += ['-j $sg-fallback'] return ipv4_rules, ipv6_rules - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" - return self.nwfilter.instance_filter_exists(instance) + return self.nwfilter.instance_filter_exists(instance, network_info) def refresh_security_group_members(self, security_group): - pass + self.do_refresh_security_group_rules(security_group) + self.iptables.apply() - def refresh_security_group_rules(self, security_group, network_info=None): - self.do_refresh_security_group_rules(security_group, network_info) + def refresh_security_group_rules(self, security_group): + self.do_refresh_security_group_rules(security_group) self.iptables.apply() @utils.synchronized('iptables', external=True) - def do_refresh_security_group_rules(self, - security_group, - network_info=None): + def do_refresh_security_group_rules(self, security_group): for instance in self.instances.values(): self.remove_filters_for_instance(instance) - if not network_info: - network_info = netutils.get_network_info(instance) - self.add_filters_for_instance(instance, network_info) + self.add_filters_for_instance(instance) def refresh_provider_fw_rules(self): """See class:FirewallDriver: docs.""" diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index a8e88fc07..6f303072d 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -23,12 +23,7 @@ import netaddr -from nova import context -from nova import db -from nova import exception from nova import flags -from nova import ipv6 -from nova import utils FLAGS = flags.FLAGS @@ -47,65 +42,3 @@ def get_net_and_prefixlen(cidr): def get_ip_version(cidr): net = netaddr.IPNetwork(cidr) return int(net.version) - - -def get_network_info(instance): - # TODO(tr3buchet): this function needs to go away! network info - # MUST be passed down from compute - # TODO(adiantum) If we will keep this function - # we should cache network_info - admin_context = context.get_admin_context() - - try: - fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) - except exception.FixedIpNotFoundForInstance: - fixed_ips = [] - - vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) - flavor = db.instance_type_get(admin_context, - instance['instance_type_id']) - network_info = [] - - for vif in vifs: - network = vif['network'] - - # determine which of the instance's IPs belong to this network - network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if - fixed_ip['network_id'] == network['id']] - - def ip_dict(ip): - return { - 'ip': ip, - 'netmask': network['netmask'], - 'enabled': '1'} - - def ip6_dict(): - prefix = network['cidr_v6'] - mac = vif['address'] - project_id = instance['project_id'] - return { - 'ip': ipv6.to_global(prefix, mac, project_id), - 'netmask': network['netmask_v6'], - 'enabled': '1'} - - mapping = { - 'label': network['label'], - 'gateway': network['gateway'], - 'broadcast': network['broadcast'], - 'dhcp_server': network['gateway'], - 'mac': vif['address'], - 'rxtx_cap': flavor['rxtx_cap'], - 'dns': [], - 'ips': [ip_dict(ip) for ip in network_ips]} - - if network['dns1']: - mapping['dns'].append(network['dns1']) - if network['dns2']: - mapping['dns'].append(network['dns2']) - - if FLAGS.use_ipv6: - mapping['ip6s'] = [ip6_dict()] - mapping['gateway6'] = network['gateway_v6'] - - network_info.append((network, mapping)) - return network_info diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index e243d4fa0..4cb9abda4 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -44,7 +44,7 @@ class LibvirtBridgeDriver(VIFDriver): gateway6 = mapping.get('gateway6') mac_id = mapping['mac'].replace(':', '') - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: template = "<parameter name=\"%s\"value=\"%s\" />\n" net, mask = netutils.get_net_and_mask(network['cidr']) values = [("PROJNET", net), ("PROJMASK", mask)] diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index d5ac39473..1aa642e4e 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -194,6 +194,7 @@ def create_local_pifs(): Do this one per host.""" for host_ref in _db_content['host'].keys(): _create_local_pif(host_ref) + _create_local_sr_iso(host_ref) def create_local_srs(): @@ -222,6 +223,25 @@ def _create_local_sr(host_ref): return sr_ref +def _create_local_sr_iso(host_ref): + sr_ref = _create_object( + 'SR', + {'name_label': 'Local storage ISO', + 'type': 'lvm', + 'content_type': 'iso', + 'shared': False, + 'physical_size': str(1 << 30), + 'physical_utilisation': str(0), + 'virtual_allocation': str(0), + 'other_config': { + 'i18n-original-value-name_label': 'Local storage ISO', + 'i18n-key': 'local-storage-iso'}, + 'VDIs': []}) + pbd_ref = create_pbd('', host_ref, sr_ref, True) + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + return sr_ref + + def _create_local_pif(host_ref): pif_ref = _create_object('PIF', {'name-label': 'Fake PIF', diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6c44d53d4..ba5cf4b49 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -77,6 +77,7 @@ class ImageType: 3 - raw disk image (local SR, NOT partitioned by plugin) 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for linux, HVM assumed for Windows) + 5 - ISO disk image (local SR, NOT partitioned by plugin) """ KERNEL = 0 @@ -84,14 +85,17 @@ class ImageType: DISK = 2 DISK_RAW = 3 DISK_VHD = 4 - _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD) + DISK_ISO = 5 + _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO) KERNEL_STR = "kernel" RAMDISK_STR = "ramdisk" DISK_STR = "os" DISK_RAW_STR = "os_raw" DISK_VHD_STR = "vhd" - _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR) + DISK_ISO_STR = "iso" + _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR, + DISK_ISO_STR) @classmethod def to_string(cls, image_type): @@ -223,6 +227,30 @@ class VMHelper(HelperBase): return vbd_ref @classmethod + def create_cd_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): + """Create a VBD record. Returns a Deferred that gives the new + VBD reference specific to CDRom devices.""" + vbd_rec = {} + vbd_rec['VM'] = vm_ref + vbd_rec['VDI'] = vdi_ref + vbd_rec['userdevice'] = str(userdevice) + vbd_rec['bootable'] = bootable + vbd_rec['mode'] = 'RO' + vbd_rec['type'] = 'CD' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + LOG.debug(_('Creating a CDROM-specific VBD for VM %(vm_ref)s,' + ' VDI %(vdi_ref)s ... ') % locals()) + vbd_ref = session.call_xenapi('VBD.create', vbd_rec) + LOG.debug(_('Created a CDROM-specific VBD %(vbd_ref)s ' + ' for VM %(vm_ref)s, VDI %(vdi_ref)s.') % locals()) + return vbd_ref + + @classmethod def find_vbd_by_number(cls, session, vm_ref, number): """Get the VBD reference from the device number""" vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref) @@ -368,6 +396,23 @@ class VMHelper(HelperBase): session.wait_for_task(task, instance.id) @classmethod + def fetch_blank_disk(cls, session, instance_type_id): + # Size the blank harddrive to suit the machine type: + one_gig = 1024 * 1024 * 1024 + req_type = instance_types.get_instance_type(instance_type_id) + req_size = req_type['local_gb'] + + LOG.debug("Creating blank HD of size %(req_size)d gigs" + % locals()) + vdi_size = one_gig * req_size + + LOG.debug("ISO vm create: Looking for the SR") + sr_ref = safe_find_sr(session) + + vdi_ref = cls.create_vdi(session, sr_ref, 'blank HD', vdi_size, False) + return vdi_ref + + @classmethod def fetch_image(cls, context, session, instance_id, image, user_id, project_id, image_type): """Fetch image from glance based on image type. @@ -449,7 +494,12 @@ class VMHelper(HelperBase): # DISK restores LOG.debug(_("Fetching image %(image)s") % locals()) LOG.debug(_("Image Type: %s"), ImageType.to_string(image_type)) - sr_ref = safe_find_sr(session) + + if image_type == ImageType.DISK_ISO: + sr_ref = safe_find_iso_sr(session) + LOG.debug(_("ISO: Found sr possibly containing the ISO image")) + else: + sr_ref = safe_find_sr(session) glance_client, image_id = nova.image.get_glance_client(image) glance_client.set_auth_token(getattr(context, 'auth_token', None)) @@ -527,7 +577,8 @@ class VMHelper(HelperBase): ImageType.RAMDISK: 'RAMDISK', ImageType.DISK: 'DISK', ImageType.DISK_RAW: 'DISK_RAW', - ImageType.DISK_VHD: 'DISK_VHD'} + ImageType.DISK_VHD: 'DISK_VHD', + ImageType.DISK_ISO: 'DISK_ISO'} disk_format = pretty_format[image_type] image_ref = instance.image_ref instance_id = instance.id @@ -540,7 +591,8 @@ class VMHelper(HelperBase): 'aki': ImageType.KERNEL, 'ari': ImageType.RAMDISK, 'raw': ImageType.DISK_RAW, - 'vhd': ImageType.DISK_VHD} + 'vhd': ImageType.DISK_VHD, + 'iso': ImageType.DISK_ISO} image_ref = instance.image_ref glance_client, image_id = nova.image.get_glance_client(image_ref) meta = glance_client.get_image_meta(image_id) @@ -574,6 +626,8 @@ class VMHelper(HelperBase): available 3. Glance (DISK): pv is assumed + + 4. Glance (DISK_ISO): no pv is assumed """ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) @@ -589,6 +643,9 @@ class VMHelper(HelperBase): elif disk_image_type == ImageType.DISK: # 3. Disk is_pv = True + elif disk_image_type == ImageType.DISK_ISO: + # 4. ISO + is_pv = False else: raise exception.Error(_("Unknown image format %(disk_image_type)s") % locals()) @@ -832,6 +889,48 @@ def find_sr(session): return None +def safe_find_iso_sr(session): + """Same as find_iso_sr except raises a NotFound exception if SR cannot be + determined + """ + sr_ref = find_iso_sr(session) + if sr_ref is None: + raise exception.NotFound(_('Cannot find SR of content-type ISO')) + return sr_ref + + +def find_iso_sr(session): + """Return the storage repository to hold ISO images""" + host = session.get_xenapi_host() + sr_refs = session.get_xenapi().SR.get_all() + for sr_ref in sr_refs: + sr_rec = session.get_xenapi().SR.get_record(sr_ref) + + LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals()) + if not sr_rec['content_type'] == 'iso': + LOG.debug(_("ISO: not iso content")) + continue + if not 'i18n-key' in sr_rec['other_config']: + LOG.debug(_("ISO: iso content_type, no 'i18n-key' key")) + continue + if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso': + LOG.debug(_("ISO: iso content_type, i18n-key value not " + "'local-storage-iso'")) + continue + + LOG.debug(_("ISO: SR MATCHing our criteria")) + for pbd_ref in sr_rec['PBDs']: + LOG.debug(_("ISO: ISO, looking to see if it is host local")) + pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref) + pbd_rec_host = pbd_rec['host'] + LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, have %(host)s") % + locals()) + if pbd_rec_host == host: + LOG.debug(_("ISO: SR with local PBD")) + return sr_ref + return None + + def remap_vbd_dev(dev): """Return the appropriate location for a plugged-in VBD device diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b1522729a..1fefd1291 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -235,12 +235,51 @@ class VMOps(object): raise vm_create_error - VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=first_vdi_ref, userdevice=0, bootable=True) + # Add disks to VM + self._attach_disks(instance, disk_image_type, vm_ref, first_vdi_ref, + vdis) + + # Alter the image before VM start for, e.g. network injection + if FLAGS.flat_injected: + VMHelper.preconfigure_instance(self._session, instance, + first_vdi_ref, network_info) + + self.create_vifs(vm_ref, instance, network_info) + self.inject_network_info(instance, network_info, vm_ref) + return vm_ref + + def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref, + vdis): + # device 0 reserved for RW disk + userdevice = 0 + + # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk + if disk_image_type == ImageType.DISK_ISO: + LOG.debug("detected ISO image type, going to create blank VM for " + "install") + + cd_vdi_ref = first_vdi_ref + first_vdi_ref = VMHelper.fetch_blank_disk(session=self._session, + instance_type_id=instance.instance_type_id) + + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=False) + + # device 1 reserved for rescue disk and we've used '0' + userdevice = 2 + VMHelper.create_cd_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=cd_vdi_ref, userdevice=userdevice, bootable=True) + + # set user device to next free value + userdevice += 1 + else: + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=True) + # set user device to next free value + # userdevice 1 is reserved for rescue and we've used '0' + userdevice = 2 # Attach any other disks - # userdevice 1 is reserved for rescue - userdevice = 2 for vdi in vdis[1:]: # vdi['vdi_type'] is either 'os' or 'swap', but we don't # really care what it is right here. @@ -251,15 +290,6 @@ class VMOps(object): bootable=False) userdevice += 1 - # Alter the image before VM start for, e.g. network injection - if FLAGS.flat_injected: - VMHelper.preconfigure_instance(self._session, instance, - first_vdi_ref, network_info) - - self.create_vifs(vm_ref, instance, network_info) - self.inject_network_info(instance, network_info, vm_ref) - return vm_ref - def _spawn(self, instance, vm_ref): """Spawn a new instance.""" LOG.debug(_('Starting VM %s...'), vm_ref) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 76b6c57fc..0d23e7689 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -309,12 +309,12 @@ class XenAPIConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only libvirt.""" return def live_migration(self, context, instance_ref, dest, - post_method, recover_method): + post_method, recover_method, block_migration=False): """This method is supported only by libvirt.""" return |
