summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTushar Patil <tushar.vitthal.patil@gmail.com>2011-08-16 16:18:48 -0700
committerTushar Patil <tushar.vitthal.patil@gmail.com>2011-08-16 16:18:48 -0700
commit982427040554d3cfcee25abab981215f73650b3e (patch)
tree17fdd26910568059637e6b203f3284d64402579a
parent9081e8b62ea01828238ecaebdcf3e627ada3fe9a (diff)
parent6dbcc60d5f8d4995a706f0de449756ecea4ebaa0 (diff)
downloadnova-982427040554d3cfcee25abab981215f73650b3e.tar.gz
nova-982427040554d3cfcee25abab981215f73650b3e.tar.xz
nova-982427040554d3cfcee25abab981215f73650b3e.zip
Resolved conflicts and merged with trunk
-rwxr-xr-xbin/nova-dhcpbridge12
-rwxr-xr-xbin/nova-manage5
-rw-r--r--nova/api/ec2/cloud.py2
-rw-r--r--nova/api/openstack/contrib/quotas.py100
-rw-r--r--nova/compute/manager.py64
-rw-r--r--nova/db/sqlalchemy/api.py21
-rw-r--r--nova/exception.py4
-rw-r--r--nova/image/s3.py26
-rw-r--r--nova/network/linux_net.py358
-rw-r--r--nova/network/manager.py197
-rw-r--r--nova/scheduler/abstract_scheduler.py (renamed from nova/scheduler/zone_aware_scheduler.py)32
-rw-r--r--nova/scheduler/host_filter.py51
-rw-r--r--nova/scheduler/least_cost.py7
-rw-r--r--nova/tests/api/openstack/contrib/test_quotas.py152
-rw-r--r--nova/tests/api/openstack/test_extensions.py24
-rw-r--r--nova/tests/scheduler/test_abstract_scheduler.py (renamed from nova/tests/scheduler/test_zone_aware_scheduler.py)40
-rw-r--r--nova/tests/scheduler/test_least_cost_scheduler.py4
-rw-r--r--nova/tests/scheduler/test_scheduler.py4
-rw-r--r--nova/tests/test_compute.py63
-rw-r--r--nova/tests/test_metadata.py2
-rw-r--r--nova/tests/test_network.py72
-rw-r--r--nova/tests/xenapi/stubs.py4
-rw-r--r--nova/virt/libvirt/connection.py5
-rw-r--r--nova/virt/libvirt/vif.py6
-rw-r--r--nova/virt/xenapi/fake.py1
-rw-r--r--nova/virt/xenapi/vm_utils.py75
-rw-r--r--nova/virt/xenapi/vmops.py6
-rw-r--r--smoketests/test_netadmin.py19
28 files changed, 978 insertions, 378 deletions
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 621222d8f..a47ea7a76 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -48,7 +48,6 @@ flags.DECLARE('auth_driver', 'nova.auth.manager')
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
-flags.DEFINE_string('dnsmasq_interface', 'br0', 'Default Dnsmasq interface')
LOG = logging.getLogger('nova.dhcpbridge')
@@ -87,10 +86,10 @@ def del_lease(mac, ip_address, _interface):
"args": {"address": ip_address}})
-def init_leases(interface):
- """Get the list of hosts for an interface."""
+def init_leases(network_id):
+ """Get the list of hosts for a network."""
ctxt = context.get_admin_context()
- network_ref = db.network_get_by_bridge(ctxt, interface)
+ network_ref = db.network_get(ctxt, network_id)
return linux_net.get_dhcp_leases(ctxt, network_ref)
@@ -101,7 +100,8 @@ def main():
argv = FLAGS(sys.argv)
logging.setup()
# check ENV first so we don't break any older deploys
- interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface)
+ network_id = int(os.environ.get('NETWORK_ID'))
+
if int(os.environ.get('TESTING', '0')):
from nova.tests import fake_flags
@@ -120,7 +120,7 @@ def main():
LOG.debug(msg)
globals()[action + '_lease'](mac, ip, interface)
else:
- print init_leases(interface)
+ print init_leases(network_id)
if __name__ == "__main__":
main()
diff --git a/bin/nova-manage b/bin/nova-manage
index 9592d5132..62504f827 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -719,7 +719,7 @@ class NetworkCommands(object):
# sanitize other input using FLAGS if necessary
if not num_networks:
num_networks = FLAGS.num_networks
- if not network_size:
+ if not network_size and fixed_range_v4:
fixnet = netaddr.IPNetwork(fixed_range_v4)
each_subnet_size = fixnet.size / int(num_networks)
if each_subnet_size > FLAGS.network_size:
@@ -741,6 +741,9 @@ class NetworkCommands(object):
if not dns1 and FLAGS.flat_network_dns:
dns1 = FLAGS.flat_network_dns
+ if not network_size:
+ network_size = FLAGS.network_size
+
# create the network
net_manager = utils.import_object(FLAGS.network_manager)
net_manager.create_networks(context.get_admin_context(),
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 87bba58c3..9aebf92e3 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -305,7 +305,7 @@ class CloudController(object):
'hostname': hostname,
'instance-action': 'none',
'instance-id': ec2_id,
- 'instance-type': instance_ref['instance_type'],
+ 'instance-type': instance_ref['instance_type']['name'],
'local-hostname': hostname,
'local-ipv4': address,
'placement': {'availability-zone': availability_zone},
diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py
new file mode 100644
index 000000000..459b71dfd
--- /dev/null
+++ b/nova/api/openstack/contrib/quotas.py
@@ -0,0 +1,100 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova import db
+from nova import exception
+from nova import quota
+from nova.api.openstack import extensions
+
+
+class QuotaSetsController(object):
+
+ def _format_quota_set(self, project_id, quota_set):
+ """Convert the quota object to a result dict"""
+
+ return {'quota_set': {
+ 'id': str(project_id),
+ 'metadata_items': quota_set['metadata_items'],
+ 'injected_file_content_bytes':
+ quota_set['injected_file_content_bytes'],
+ 'volumes': quota_set['volumes'],
+ 'gigabytes': quota_set['gigabytes'],
+ 'ram': quota_set['ram'],
+ 'floating_ips': quota_set['floating_ips'],
+ 'instances': quota_set['instances'],
+ 'injected_files': quota_set['injected_files'],
+ 'cores': quota_set['cores'],
+ }}
+
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ try:
+ db.sqlalchemy.api.authorize_project_context(context, id)
+ return self._format_quota_set(id,
+ quota.get_project_quotas(context, id))
+ except exception.NotAuthorized:
+ return webob.Response(status_int=403)
+
+ def update(self, req, id, body):
+ context = req.environ['nova.context']
+ project_id = id
+ resources = ['metadata_items', 'injected_file_content_bytes',
+ 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances',
+ 'injected_files', 'cores']
+ for key in body['quota_set'].keys():
+ if key in resources:
+ value = int(body['quota_set'][key])
+ try:
+ db.quota_update(context, project_id, key, value)
+ except exception.ProjectQuotaNotFound:
+ db.quota_create(context, project_id, key, value)
+ except exception.AdminRequired:
+ return webob.Response(status_int=403)
+ return {'quota_set': quota.get_project_quotas(context, project_id)}
+
+ def defaults(self, req, id):
+ return self._format_quota_set(id, quota._get_default_quotas())
+
+
+class Quotas(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "Quotas"
+
+ def get_alias(self):
+ return "os-quota-sets"
+
+ def get_description(self):
+ return "Quotas management support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/quotas-sets/api/v1.1"
+
+ def get_updated(self):
+ return "2011-08-08T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+
+ res = extensions.ResourceExtension('os-quota-sets',
+ QuotaSetsController(),
+ member_actions={'defaults': 'GET'})
+ resources.append(res)
+
+ return resources
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d9ca31f60..f3260486a 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -323,11 +323,72 @@ class ComputeManager(manager.SchedulerDependentManager):
def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
+ def _check_image_size():
+ """Ensure image is smaller than the maximum size allowed by the
+ instance_type.
+
+ The image stored in Glance is potentially compressed, so we use two
+ checks to ensure that the size isn't exceeded:
+
+ 1) This one - checks compressed size, this a quick check to
+ eliminate any images which are obviously too large
+
+ 2) Check uncompressed size in nova.virt.xenapi.vm_utils. This
+ is a slower check since it requires uncompressing the entire
+ image, but is accurate because it reflects the image's
+ actual size.
+ """
+ # NOTE(jk0): image_ref is defined in the DB model, image_href is
+ # used by the image service. This should be refactored to be
+ # consistent.
+ image_href = instance['image_ref']
+ image_service, image_id = nova.image.get_image_service(image_href)
+ image_meta = image_service.show(context, image_id)
+
+ try:
+ size_bytes = image_meta['size']
+ except KeyError:
+ # Size is not a required field in the image service (yet), so
+ # we are unable to rely on it being there even though it's in
+ # glance.
+
+ # TODO(jk0): Should size be required in the image service?
+ return
+
+ instance_type_id = instance['instance_type_id']
+ instance_type = self.db.instance_type_get(context,
+ instance_type_id)
+ allowed_size_gb = instance_type['local_gb']
+
+ # NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we
+ # need to handle potential situations where local_gb is 0. This is
+ # the default for m1.tiny.
+ if allowed_size_gb == 0:
+ return
+
+ allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+
+ LOG.debug(_("image_id=%(image_id)d, image_size_bytes="
+ "%(size_bytes)d, allowed_size_bytes="
+ "%(allowed_size_bytes)d") % locals())
+
+ if size_bytes > allowed_size_bytes:
+ LOG.info(_("Image '%(image_id)d' size %(size_bytes)d exceeded"
+ " instance_type allowed size "
+ "%(allowed_size_bytes)d")
+ % locals())
+ raise exception.ImageTooLarge()
+
context = context.elevated()
instance = self.db.instance_get(context, instance_id)
+
requested_networks = kwargs.get('requested_networks', None)
+
if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
+
+ _check_image_size()
+
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
updates = {}
@@ -1317,7 +1378,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
- self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info)
+ self.driver.ensure_filtering_rules_for_instance(instance_ref,
+ network_info)
# Preparation for block migration
if block_migration:
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 21eb85b2c..2f9cab1ab 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1212,6 +1212,19 @@ def instance_get_all_by_filters(context, filters):
return True
return False
+ def _regexp_filter_by_metadata(instance, meta):
+ inst_metadata = [{node['key']: node['value']} \
+ for node in instance['metadata']]
+ if isinstance(meta, list):
+ for node in meta:
+ if node not in inst_metadata:
+ return False
+ elif isinstance(meta, dict):
+ for k, v in meta.iteritems():
+ if {k: v} not in inst_metadata:
+ return False
+ return True
+
def _regexp_filter_by_column(instance, filter_name, filter_re):
try:
v = getattr(instance, filter_name)
@@ -1270,7 +1283,9 @@ def instance_get_all_by_filters(context, filters):
query_prefix = _exact_match_filter(query_prefix, filter_name,
filters.pop(filter_name))
- instances = query_prefix.all()
+ instances = query_prefix.\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
if not instances:
return []
@@ -1286,6 +1301,9 @@ def instance_get_all_by_filters(context, filters):
filter_re = re.compile(str(filters[filter_name]))
if filter_func:
filter_l = lambda instance: filter_func(instance, filter_re)
+ elif filter_name == 'metadata':
+ filter_l = lambda instance: _regexp_filter_by_metadata(instance,
+ filters[filter_name])
else:
filter_l = lambda instance: _regexp_filter_by_column(instance,
filter_name, filter_re)
@@ -2021,6 +2039,7 @@ def quota_get(context, project_id, resource, session=None):
@require_context
def quota_get_all_by_project(context, project_id):
+ authorize_project_context(context, project_id)
session = get_session()
result = {'project_id': project_id}
rows = session.query(models.Quota).\
diff --git a/nova/exception.py b/nova/exception.py
index c68c89cad..97275fa25 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -756,3 +756,7 @@ class CannotResizeToSameSize(NovaException):
class CannotResizeToSmallerSize(NovaException):
message = _("Resizing to a smaller size is not supported.")
+
+
+class ImageTooLarge(NovaException):
+ message = _("Image is larger than instance type allows")
diff --git a/nova/image/s3.py b/nova/image/s3.py
index ccbfa89cd..abf01a942 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -193,6 +193,8 @@ class S3ImageService(service.BaseImageService):
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
+ log_vars = {'image_location': image_location,
+ 'image_path': image_path}
metadata['properties']['image_state'] = 'downloading'
self.service.update(context, image_id, metadata)
@@ -213,11 +215,11 @@ class S3ImageService(service.BaseImageService):
shutil.copyfileobj(part, combined)
except Exception:
- LOG.error(_("Failed to download %(image_location)s "
- "to %(image_path)s"), locals())
+ LOG.exception(_("Failed to download %(image_location)s "
+ "to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_download'
self.service.update(context, image_id, metadata)
- raise
+ return
metadata['properties']['image_state'] = 'decrypting'
self.service.update(context, image_id, metadata)
@@ -237,11 +239,11 @@ class S3ImageService(service.BaseImageService):
encrypted_iv, cloud_pk,
dec_filename)
except Exception:
- LOG.error(_("Failed to decrypt %(image_location)s "
- "to %(image_path)s"), locals())
+ LOG.exception(_("Failed to decrypt %(image_location)s "
+ "to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_decrypt'
self.service.update(context, image_id, metadata)
- raise
+ return
metadata['properties']['image_state'] = 'untarring'
self.service.update(context, image_id, metadata)
@@ -249,11 +251,11 @@ class S3ImageService(service.BaseImageService):
try:
unz_filename = self._untarzip_image(image_path, dec_filename)
except Exception:
- LOG.error(_("Failed to untar %(image_location)s "
- "to %(image_path)s"), locals())
+ LOG.exception(_("Failed to untar %(image_location)s "
+ "to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_untar'
self.service.update(context, image_id, metadata)
- raise
+ return
metadata['properties']['image_state'] = 'uploading'
self.service.update(context, image_id, metadata)
@@ -262,11 +264,11 @@ class S3ImageService(service.BaseImageService):
self.service.update(context, image_id,
metadata, image_file)
except Exception:
- LOG.error(_("Failed to upload %(image_location)s "
- "to %(image_path)s"), locals())
+ LOG.exception(_("Failed to upload %(image_location)s "
+ "to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_upload'
self.service.update(context, image_id, metadata)
- raise
+ return
metadata['properties']['image_state'] = 'available'
metadata['status'] = 'active'
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 4e1e1f85a..57c1d0c28 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -63,6 +63,11 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
'dmz range that should be accepted')
flags.DEFINE_string('dnsmasq_config_file', "",
'Override the default dnsmasq settings with this file')
+flags.DEFINE_string('linuxnet_interface_driver',
+ 'nova.network.linux_net.LinuxBridgeInterfaceDriver',
+ 'Driver used to create ethernet devices.')
+flags.DEFINE_string('linuxnet_ovs_integration_bridge',
+ 'br-int', 'Name of Open vSwitch bridge used with linuxnet')
binary_name = os.path.basename(inspect.stack()[-1][1])
@@ -414,7 +419,7 @@ def ensure_metadata_ip():
run_as_root=True, check_exit_code=False)
-def ensure_vlan_forward(public_ip, port, private_ip):
+def ensure_vpn_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan."""
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'-d %s -p udp '
@@ -452,111 +457,34 @@ def floating_forward_rules(floating_ip, fixed_ip):
'-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
-def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
- """Create a vlan and bridge unless they already exist."""
- interface = ensure_vlan(vlan_num, bridge_interface)
- ensure_bridge(bridge, interface, net_attrs)
- return interface
+def initialize_gateway_device(dev, network_ref):
+ if not network_ref:
+ return
-
-@utils.synchronized('ensure_vlan', external=True)
-def ensure_vlan(vlan_num, bridge_interface):
- """Create a vlan unless it already exists."""
- interface = 'vlan%s' % vlan_num
- if not _device_exists(interface):
- LOG.debug(_('Starting VLAN inteface %s'), interface)
- _execute('vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD',
- run_as_root=True)
- _execute('vconfig', 'add', bridge_interface, vlan_num,
- run_as_root=True)
- _execute('ip', 'link', 'set', interface, 'up', run_as_root=True)
- return interface
-
-
-@utils.synchronized('ensure_bridge', external=True)
-def ensure_bridge(bridge, interface, net_attrs=None):
- """Create a bridge unless it already exists.
-
- :param interface: the interface to create the bridge on.
- :param net_attrs: dictionary with attributes used to create the bridge.
-
- If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
- using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
- the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
-
- The code will attempt to move any ips that already exist on the interface
- onto the bridge and reset the default gateway if necessary.
-
- """
- if not _device_exists(bridge):
- LOG.debug(_('Starting Bridge interface for %s'), interface)
- _execute('brctl', 'addbr', bridge, run_as_root=True)
- _execute('brctl', 'setfd', bridge, 0, run_as_root=True)
- _execute('brctl', 'stp', bridge, 'off', run_as_root=True)
- _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
- if net_attrs:
- # NOTE(vish): The ip for dnsmasq has to be the first address on the
- # bridge for it to respond to reqests properly
- suffix = net_attrs['cidr'].rpartition('/')[2]
- out, err = _execute('ip', 'addr', 'add',
+ # NOTE(vish): The ip for dnsmasq has to be the first address on the
+ # bridge for it to respond to reqests properly
+ suffix = network_ref['cidr'].rpartition('/')[2]
+ out, err = _execute('ip', 'addr', 'add',
'%s/%s' %
- (net_attrs['dhcp_server'], suffix),
+ (network_ref['dhcp_server'], suffix),
'brd',
- net_attrs['broadcast'],
+ network_ref['broadcast'],
'dev',
- bridge,
+ dev,
run_as_root=True,
check_exit_code=False)
- if err and err != 'RTNETLINK answers: File exists\n':
- raise exception.Error('Failed to add ip: %s' % err)
- if(FLAGS.use_ipv6):
- _execute('ip', '-f', 'inet6', 'addr',
- 'change', net_attrs['cidr_v6'],
- 'dev', bridge, run_as_root=True)
- # NOTE(vish): If the public interface is the same as the
- # bridge, then the bridge has to be in promiscuous
- # to forward packets properly.
- if(FLAGS.public_interface == bridge):
- _execute('ip', 'link', 'set',
- 'dev', bridge, 'promisc', 'on', run_as_root=True)
- if interface:
- # NOTE(vish): This will break if there is already an ip on the
- # interface, so we move any ips to the bridge
- gateway = None
- out, err = _execute('route', '-n', run_as_root=True)
- for line in out.split('\n'):
- fields = line.split()
- if fields and fields[0] == '0.0.0.0' and fields[-1] == interface:
- gateway = fields[1]
- _execute('route', 'del', 'default', 'gw', gateway,
- 'dev', interface,
- check_exit_code=False, run_as_root=True)
- out, err = _execute('ip', 'addr', 'show', 'dev', interface,
- 'scope', 'global', run_as_root=True)
- for line in out.split('\n'):
- fields = line.split()
- if fields and fields[0] == 'inet':
- params = fields[1:-1]
- _execute(*_ip_bridge_cmd('del', params, fields[-1]),
- run_as_root=True)
- _execute(*_ip_bridge_cmd('add', params, bridge),
- run_as_root=True)
- if gateway:
- _execute('route', 'add', 'default', 'gw', gateway,
- run_as_root=True)
- out, err = _execute('brctl', 'addif', bridge, interface,
- check_exit_code=False, run_as_root=True)
-
- if (err and err != "device %s is already a member of a bridge; can't "
- "enslave it to bridge %s.\n" % (interface, bridge)):
- raise exception.Error('Failed to add interface: %s' % err)
-
- iptables_manager.ipv4['filter'].add_rule('FORWARD',
- '--in-interface %s -j ACCEPT' % \
- bridge)
- iptables_manager.ipv4['filter'].add_rule('FORWARD',
- '--out-interface %s -j ACCEPT' % \
- bridge)
+ if err and err != 'RTNETLINK answers: File exists\n':
+ raise exception.Error('Failed to add ip: %s' % err)
+ if(FLAGS.use_ipv6):
+ _execute('ip', '-f', 'inet6', 'addr',
+ 'change', network_ref['cidr_v6'],
+ 'dev', dev, run_as_root=True)
+ # NOTE(vish): If the public interface is the same as the
+ # bridge, then the bridge has to be in promiscuous
+ # to forward packets properly.
+ if(FLAGS.public_interface == dev):
+ _execute('ip', 'link', 'set',
+ 'dev', dev, 'promisc', 'on', run_as_root=True)
def get_dhcp_leases(context, network_ref):
@@ -587,21 +515,21 @@ def get_dhcp_hosts(context, network_ref):
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
-def update_dhcp(context, network_ref):
+def update_dhcp(context, dev, network_ref):
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance.
"""
- conffile = _dhcp_file(network_ref['bridge'], 'conf')
+ conffile = _dhcp_file(dev, 'conf')
with open(conffile, 'w') as f:
f.write(get_dhcp_hosts(context, network_ref))
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
- pid = _dnsmasq_pid_for(network_ref['bridge'])
+ pid = _dnsmasq_pid_for(dev)
# if dnsmasq is already running, then tell it to reload
if pid:
@@ -617,19 +545,19 @@ def update_dhcp(context, network_ref):
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
cmd = ['FLAGFILE=%s' % FLAGS.dhcpbridge_flagfile,
- 'DNSMASQ_INTERFACE=%s' % network_ref['bridge'],
+ 'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
'--bind-interfaces',
- '--interface=%s' % network_ref['bridge'],
+ '--interface=%s' % dev,
'--conf-file=%s' % FLAGS.dnsmasq_config_file,
'--domain=%s' % FLAGS.dhcp_domain,
- '--pid-file=%s' % _dhcp_file(network_ref['bridge'], 'pid'),
+ '--pid-file=%s' % _dhcp_file(dev, 'pid'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
'--dhcp-range=%s,static,120s' % network_ref['dhcp_start'],
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])),
- '--dhcp-hostsfile=%s' % _dhcp_file(network_ref['bridge'], 'conf'),
+ '--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
if FLAGS.dns_server:
@@ -639,8 +567,8 @@ def update_dhcp(context, network_ref):
@utils.synchronized('radvd_start')
-def update_ra(context, network_ref):
- conffile = _ra_file(network_ref['bridge'], 'conf')
+def update_ra(context, dev, network_ref):
+ conffile = _ra_file(dev, 'conf')
with open(conffile, 'w') as f:
conf_str = """
interface %s
@@ -654,13 +582,13 @@ interface %s
AdvAutonomous on;
};
};
-""" % (network_ref['bridge'], network_ref['cidr_v6'])
+""" % (dev, network_ref['cidr_v6'])
f.write(conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
- pid = _ra_pid_for(network_ref['bridge'])
+ pid = _ra_pid_for(dev)
# if radvd is already running, then tell it to reload
if pid:
@@ -675,8 +603,8 @@ interface %s
LOG.debug(_('Pid %d is stale, relaunching radvd'), pid)
cmd = ['radvd',
- '-C', '%s' % _ra_file(network_ref['bridge'], 'conf'),
- '-p', '%s' % _ra_file(network_ref['bridge'], 'pid')]
+ '-C', '%s' % _ra_file(dev, 'conf'),
+ '-p', '%s' % _ra_file(dev, 'pid')]
_execute(*cmd, run_as_root=True)
@@ -722,9 +650,9 @@ def _device_exists(device):
return not err
-def _stop_dnsmasq(network):
+def _stop_dnsmasq(dev):
"""Stops the dnsmasq instance for a given network."""
- pid = _dnsmasq_pid_for(network)
+ pid = _dnsmasq_pid_for(dev)
if pid:
try:
@@ -733,49 +661,49 @@ def _stop_dnsmasq(network):
LOG.debug(_('Killing dnsmasq threw %s'), exc)
-def _dhcp_file(bridge, kind):
- """Return path to a pid, leases or conf file for a bridge."""
+def _dhcp_file(dev, kind):
+ """Return path to a pid, leases or conf file for a bridge/device."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path,
- bridge,
+ dev,
kind))
-def _ra_file(bridge, kind):
- """Return path to a pid or conf file for a bridge."""
+def _ra_file(dev, kind):
+ """Return path to a pid or conf file for a bridge/device."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path,
- bridge,
+ dev,
kind))
-def _dnsmasq_pid_for(bridge):
- """Returns the pid for prior dnsmasq instance for a bridge.
+def _dnsmasq_pid_for(dev):
+ """Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
- pid_file = _dhcp_file(bridge, 'pid')
+ pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
-def _ra_pid_for(bridge):
- """Returns the pid for prior radvd instance for a bridge.
+def _ra_pid_for(dev):
+ """Returns the pid for prior radvd instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
- pid_file = _ra_file(bridge, 'pid')
+ pid_file = _ra_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
@@ -790,4 +718,180 @@ def _ip_bridge_cmd(action, params, device):
return cmd
+# Similar to compute virt layers, the Linux network node
+# code uses a flexible driver model to support different ways
+# of creating ethernet interfaces and attaching them to the network.
+# In the case of a network host, these interfaces
+# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
+
+
+def plug(network, mac_address):
+ return interface_driver.plug(network, mac_address)
+
+
+def unplug(network):
+ return interface_driver.unplug(network)
+
+
+class LinuxNetInterfaceDriver(object):
+ """Abstract class that defines generic network host API"""
+ """ for for all Linux interface drivers."""
+
+ def plug(self, network, mac_address):
+ """Create Linux device, return device name"""
+ raise NotImplementedError()
+
+ def unplug(self, network):
+ """Destory Linux device, return device name"""
+ raise NotImplementedError()
+
+
+# plugs interfaces using Linux Bridge
+class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
+
+ def plug(self, network, mac_address):
+ if network.get('vlan', None) is not None:
+ LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
+ network['vlan'],
+ network['bridge'],
+ network['bridge_interface'],
+ network,
+ mac_address)
+ else:
+ LinuxBridgeInterfaceDriver.ensure_bridge(
+ network['bridge'],
+ network['bridge_interface'],
+ network)
+
+ return network['bridge']
+
+ def unplug(self, network):
+ return network['bridge']
+
+ @classmethod
+ def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface,
+ net_attrs=None, mac_address=None):
+ """Create a vlan and bridge unless they already exist."""
+ interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num,
+ bridge_interface, mac_address)
+ LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs)
+ return interface
+
+ @classmethod
+ @utils.synchronized('ensure_vlan', external=True)
+ def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None):
+ """Create a vlan unless it already exists."""
+ interface = 'vlan%s' % vlan_num
+ if not _device_exists(interface):
+ LOG.debug(_('Starting VLAN inteface %s'), interface)
+ _execute('vconfig', 'set_name_type',
+ 'VLAN_PLUS_VID_NO_PAD', run_as_root=True)
+ _execute('vconfig', 'add', bridge_interface,
+ vlan_num, run_as_root=True)
+ # (danwent) the bridge will inherit this address, so we want to
+ # make sure it is the value set from the NetworkManager
+ if mac_address:
+ _execute('ip', 'link', 'set', interface, "address",
+ mac_address, run_as_root=True)
+ _execute('ip', 'link', 'set', interface, 'up', run_as_root=True)
+ return interface
+
+ @classmethod
+ @utils.synchronized('ensure_bridge', external=True)
+ def ensure_bridge(_self, bridge, interface, net_attrs=None):
+ """Create a bridge unless it already exists.
+
+ :param interface: the interface to create the bridge on.
+ :param net_attrs: dictionary with attributes used to create bridge.
+
+ If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
+ using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
+ the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
+
+ The code will attempt to move any ips that already exist on the
+ interface onto the bridge and reset the default gateway if necessary.
+
+ """
+ if not _device_exists(bridge):
+ LOG.debug(_('Starting Bridge interface for %s'), interface)
+ _execute('brctl', 'addbr', bridge, run_as_root=True)
+ _execute('brctl', 'setfd', bridge, 0, run_as_root=True)
+ # _execute('brctl setageing %s 10' % bridge, run_as_root=True)
+ _execute('brctl', 'stp', bridge, 'off', run_as_root=True)
+ # (danwent) bridge device MAC address can't be set directly.
+ # instead it inherits the MAC address of the first device on the
+ # bridge, which will either be the vlan interface, or a
+ # physical NIC.
+ _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
+
+ if interface:
+ out, err = _execute('brctl', 'addif', bridge, interface,
+ check_exit_code=False, run_as_root=True)
+
+ # NOTE(vish): This will break if there is already an ip on the
+ # interface, so we move any ips to the bridge
+ gateway = None
+ out, err = _execute('route', '-n', run_as_root=True)
+ for line in out.split('\n'):
+ fields = line.split()
+ if fields and fields[0] == '0.0.0.0' and \
+ fields[-1] == interface:
+ gateway = fields[1]
+ _execute('route', 'del', 'default', 'gw', gateway,
+ 'dev', interface, check_exit_code=False,
+ run_as_root=True)
+ out, err = _execute('ip', 'addr', 'show', 'dev', interface,
+ 'scope', 'global', run_as_root=True)
+ for line in out.split('\n'):
+ fields = line.split()
+ if fields and fields[0] == 'inet':
+ params = fields[1:-1]
+ _execute(*_ip_bridge_cmd('del', params, fields[-1]),
+ run_as_root=True)
+ _execute(*_ip_bridge_cmd('add', params, bridge),
+ run_as_root=True)
+ if gateway:
+ _execute('route', 'add', 'default', 'gw', gateway,
+ run_as_root=True)
+
+ if (err and err != "device %s is already a member of a bridge;"
+ "can't enslave it to bridge %s.\n" % (interface, bridge)):
+ raise exception.Error('Failed to add interface: %s' % err)
+
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '--in-interface %s -j ACCEPT' % \
+ bridge)
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '--out-interface %s -j ACCEPT' % \
+ bridge)
+
+
+# plugs interfaces using Open vSwitch
+class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
+
+ def plug(self, network, mac_address):
+ dev = "gw-" + str(network['id'])
+ if not _device_exists(dev):
+ bridge = FLAGS.linuxnet_ovs_integration_bridge
+ _execute('ovs-vsctl',
+ '--', '--may-exist', 'add-port', bridge, dev,
+ '--', 'set', 'Interface', dev, "type=internal",
+ '--', 'set', 'Interface', dev,
+ "external-ids:iface-id=nova-%s" % dev,
+ '--', 'set', 'Interface', dev,
+ "external-ids:iface-status=active",
+ '--', 'set', 'Interface', dev,
+ "external-ids:attached-mac=%s" % mac_address,
+ run_as_root=True)
+ _execute('ip', 'link', 'set', dev, "address", mac_address,
+ run_as_root=True)
+ _execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
+
+ return dev
+
+ def unplug(self, network):
+ dev = "gw-" + str(network['id'])
+ return dev
+
iptables_manager = IptablesManager()
+interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 2f30f8ec1..9b4e0cd07 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -45,6 +45,7 @@ topologies. All of the network commands are issued to a subclass of
"""
import datetime
+import itertools
import math
import netaddr
import socket
@@ -555,7 +556,7 @@ class NetworkManager(manager.SchedulerDependentManager):
raise exception.VirtualInterfaceMacAddressException()
def generate_mac_address(self):
- """Generate a mac address for a vif on an instance."""
+ """Generate an Ethernet MAC address."""
mac = [0x02, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
@@ -651,121 +652,112 @@ class NetworkManager(manager.SchedulerDependentManager):
network_ref = self.db.fixed_ip_get_network(context, address)
self._setup_network(context, network_ref)
- def _validate_cidrs(self, context, cidr, num_networks, network_size):
- significant_bits = 32 - int(math.log(network_size, 2))
- req_net = netaddr.IPNetwork(cidr)
- req_net_ip = str(req_net.ip)
- req_size = network_size * num_networks
- if req_size > req_net.size:
- msg = _("network_size * num_networks exceeds cidr size")
- raise ValueError(msg)
- adjusted_cidr_str = req_net_ip + '/' + str(significant_bits)
- adjusted_cidr = netaddr.IPNetwork(adjusted_cidr_str)
- try:
- used_nets = self.db.network_get_all(context)
- except exception.NoNetworksFound:
- used_nets = []
- used_cidrs = [netaddr.IPNetwork(net['cidr']) for net in used_nets]
- if adjusted_cidr in used_cidrs:
- raise ValueError(_("cidr already in use"))
- for adjusted_cidr_supernet in adjusted_cidr.supernet():
- if adjusted_cidr_supernet in used_cidrs:
- msg = _("requested cidr (%s) conflicts with existing supernet")
- raise ValueError(msg % str(adjusted_cidr))
- # watch for smaller subnets conflicting
- used_supernets = []
- for used_cidr in used_cidrs:
- if not used_cidr:
- continue
- if used_cidr.size < network_size:
- for ucsupernet in used_cidr.supernet():
- if ucsupernet.size == network_size:
- used_supernets.append(ucsupernet)
- all_req_nets = []
- if num_networks == 1:
- if adjusted_cidr in used_supernets:
- msg = _("requested cidr (%s) conflicts with existing smaller"
- " cidr")
- raise ValueError(msg % str(adjusted_cidr))
- else:
- all_req_nets.append(adjusted_cidr)
- elif num_networks >= 2:
- # split supernet into subnets
- next_cidr = adjusted_cidr
- for index in range(num_networks):
- if next_cidr.first > req_net.last:
- msg = _("Not enough subnets avail to satisfy requested "
- "num_net works - some subnets in requested range"
- " already in use")
- raise ValueError(msg)
- while True:
- used_values = used_cidrs + used_supernets
- if next_cidr in used_values:
- next_cidr = next_cidr.next()
- else:
- all_req_nets.append(next_cidr)
- next_cidr = next_cidr.next()
- break
- all_req_nets = sorted(list(set(all_req_nets)))
- return all_req_nets
-
def create_networks(self, context, label, cidr, multi_host, num_networks,
network_size, cidr_v6, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None, **kwargs):
"""Create networks based on parameters."""
+ # NOTE(jkoelker): these are dummy values to make sure iter works
+ fixed_net_v4 = netaddr.IPNetwork('0/32')
+ fixed_net_v6 = netaddr.IPNetwork('::0/128')
+ subnets_v4 = []
+ subnets_v6 = []
+
+ subnet_bits = int(math.ceil(math.log(network_size, 2)))
+
if cidr_v6:
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
- significant_bits_v6 = 64
- network_size_v6 = 1 << 64
+ prefixlen_v6 = 128 - subnet_bits
+ subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks)
if cidr:
- req_cidrs = self._validate_cidrs(context, cidr, num_networks,
- network_size)
+ fixed_net_v4 = netaddr.IPNetwork(cidr)
+ prefixlen_v4 = 32 - subnet_bits
+ subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
+ count=num_networks))
- for index in range(num_networks):
+ # NOTE(jkoelker): This replaces the _validate_cidrs call and
+ # prevents looping multiple times
+ try:
+ nets = self.db.network_get_all(context)
+ except exception.NoNetworksFound:
+ nets = []
+ used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets]
+
+ def find_next(subnet):
+ next_subnet = subnet.next()
+ while next_subnet in subnets_v4:
+ next_subnet = next_subnet.next()
+ if next_subnet in fixed_net_v4:
+ return next_subnet
+
+ for subnet in list(subnets_v4):
+ if subnet in used_subnets:
+ next_subnet = find_next(subnet)
+ if next_subnet:
+ subnets_v4.remove(subnet)
+ subnets_v4.append(next_subnet)
+ subnet = next_subnet
+ else:
+ raise ValueError(_('cidr already in use'))
+ for used_subnet in used_subnets:
+ if subnet in used_subnet:
+ msg = _('requested cidr (%(cidr)s) conflicts with '
+ 'existing supernet (%(super)s)')
+ raise ValueError(msg % {'cidr': subnet,
+ 'super': used_subnet})
+ if used_subnet in subnet:
+ next_subnet = find_next(subnet)
+ if next_subnet:
+ subnets_v4.remove(subnet)
+ subnets_v4.append(next_subnet)
+ subnet = next_subnet
+ else:
+ msg = _('requested cidr (%(cidr)s) conflicts '
+ 'with existing smaller cidr '
+ '(%(smaller)s)')
+ raise ValueError(msg % {'cidr': subnet,
+ 'smaller': used_subnet})
+
+ networks = []
+ subnets = itertools.izip_longest(subnets_v4, subnets_v6)
+ for index, (subnet_v4, subnet_v6) in enumerate(subnets):
net = {}
net['bridge'] = bridge
net['bridge_interface'] = bridge_interface
+ net['multi_host'] = multi_host
+
net['dns1'] = dns1
net['dns2'] = dns2
- if cidr:
- project_net = req_cidrs[index]
- net['cidr'] = str(project_net)
- net['multi_host'] = multi_host
- net['netmask'] = str(project_net.netmask)
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast)
- net['dhcp_start'] = str(project_net[2])
-
if num_networks > 1:
net['label'] = '%s_%d' % (label, index)
else:
net['label'] = label
- if cidr_v6:
- start_v6 = index * network_size_v6
- cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
- significant_bits_v6)
- net['cidr_v6'] = cidr_v6
-
- project_net_v6 = netaddr.IPNetwork(cidr_v6)
+ if cidr and subnet_v4:
+ net['cidr'] = str(subnet_v4)
+ net['netmask'] = str(subnet_v4.netmask)
+ net['gateway'] = str(subnet_v4[1])
+ net['broadcast'] = str(subnet_v4.broadcast)
+ net['dhcp_start'] = str(subnet_v4[2])
+ if cidr_v6 and subnet_v6:
+ net['cidr_v6'] = str(subnet_v6)
if gateway_v6:
# use a pre-defined gateway if one is provided
net['gateway_v6'] = str(gateway_v6)
else:
- net['gateway_v6'] = str(project_net_v6[1])
+ net['gateway_v6'] = str(subnet_v6[1])
- net['netmask_v6'] = str(project_net_v6._prefixlen)
+ net['netmask_v6'] = str(subnet_v6._prefixlen)
if kwargs.get('vpn', False):
# this bit here is for vlan-manager
del net['dns1']
del net['dns2']
vlan = kwargs['vlan_start'] + index
- net['vpn_private_address'] = str(project_net[2])
- net['dhcp_start'] = str(project_net[3])
+ net['vpn_private_address'] = str(subnet_v4[2])
+ net['dhcp_start'] = str(subnet_v4[3])
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
@@ -778,9 +770,12 @@ class NetworkManager(manager.SchedulerDependentManager):
if not network:
raise ValueError(_('Network already exists!'))
+ else:
+ networks.append(network)
- if network and cidr:
+ if network and cidr and subnet_v4:
self._create_fixed_ips(context, network['id'])
+ return networks
@property
def _bottom_reserved_ips(self): # pylint: disable=R0201
@@ -930,14 +925,16 @@ class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager):
def _setup_network(self, context, network_ref):
"""Sets up network on this host."""
network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
- self.driver.ensure_bridge(network_ref['bridge'],
- network_ref['bridge_interface'],
- network_ref)
+
+ mac_address = self.generate_mac_address()
+ dev = self.driver.plug(network_ref, mac_address)
+ self.driver.initialize_gateway_device(dev, network_ref)
+
if not FLAGS.fake_network:
- self.driver.update_dhcp(context, network_ref)
+ self.driver.update_dhcp(context, dev, network_ref)
if(FLAGS.use_ipv6):
- self.driver.update_ra(context, network_ref)
- gateway = utils.get_my_linklocal(network_ref['bridge'])
+ self.driver.update_ra(context, dev, network_ref)
+ gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network_ref['id'],
{'gateway_v6': gateway})
@@ -1039,23 +1036,23 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
else:
address = network_ref['vpn_public_address']
network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
- self.driver.ensure_vlan_bridge(network_ref['vlan'],
- network_ref['bridge'],
- network_ref['bridge_interface'],
- network_ref)
+
+ mac_address = self.generate_mac_address()
+ dev = self.driver.plug(network_ref, mac_address)
+ self.driver.initialize_gateway_device(dev, network_ref)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
if address == FLAGS.vpn_ip and hasattr(self.driver,
- "ensure_vlan_forward"):
- self.driver.ensure_vlan_forward(FLAGS.vpn_ip,
+ "ensure_vpn_forward"):
+ self.driver.ensure_vpn_forward(FLAGS.vpn_ip,
network_ref['vpn_public_port'],
network_ref['vpn_private_address'])
if not FLAGS.fake_network:
- self.driver.update_dhcp(context, network_ref)
+ self.driver.update_dhcp(context, dev, network_ref)
if(FLAGS.use_ipv6):
- self.driver.update_ra(context, network_ref)
- gateway = utils.get_my_linklocal(network_ref['bridge'])
+ self.driver.update_ra(context, dev, network_ref)
+ gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network_ref['id'],
{'gateway_v6': gateway})
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/abstract_scheduler.py
index d1924c9f9..eb924732a 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/abstract_scheduler.py
@@ -14,7 +14,7 @@
# under the License.
"""
-The Zone Aware Scheduler is a base class Scheduler for creating instances
+The AbsractScheduler is a base class Scheduler for creating instances
across zones. There are two expansion points to this class for:
1. Assigning Weights to hosts for requested instances
2. Filtering Hosts based on required instance capabilities
@@ -40,7 +40,7 @@ from nova.scheduler import api
from nova.scheduler import driver
FLAGS = flags.FLAGS
-LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler')
+LOG = logging.getLogger('nova.scheduler.abstract_scheduler')
class InvalidBlob(exception.NovaException):
@@ -48,8 +48,10 @@ class InvalidBlob(exception.NovaException):
"to instance create request.")
-class ZoneAwareScheduler(driver.Scheduler):
- """Base class for creating Zone Aware Schedulers."""
+class AbstractScheduler(driver.Scheduler):
+ """Base class for creating Schedulers that can work across any nova
+ deployment, from simple designs to multiply-nested zones.
+ """
def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing."""
@@ -266,7 +268,7 @@ class ZoneAwareScheduler(driver.Scheduler):
"""
if topic != "compute":
- raise NotImplementedError(_("Zone Aware Scheduler only understands"
+ raise NotImplementedError(_("Scheduler only understands"
" Compute nodes (for now)"))
num_instances = request_spec.get('num_instances', 1)
@@ -328,13 +330,31 @@ class ZoneAwareScheduler(driver.Scheduler):
requested_mem = instance_type['memory_mb'] * 1024 * 1024
return capabilities['host_memory_free'] >= requested_mem
+ def hold_filter_hosts(self, topic, request_spec, hosts=None):
+ """Filter the full host list (from the ZoneManager)"""
+ # NOTE(dabo): The logic used by the current _schedule() method
+ # is incorrect. Since this task is just to refactor the classes,
+ # I'm not fixing the logic now - that will be the next task.
+ # So for now this method is just renamed; afterwards this will
+ # become the filter_hosts() method, and the one below will
+ # be removed.
+ filter_name = request_spec.get('filter', None)
+ # Make sure that the requested filter is legitimate.
+ selected_filter = host_filter.choose_host_filter(filter_name)
+
+ # TODO(sandy): We're only using InstanceType-based specs
+ # currently. Later we'll need to snoop for more detailed
+ # host filter requests.
+ instance_type = request_spec['instance_type']
+ name, query = selected_filter.instance_type_to_filter(instance_type)
+ return selected_filter.filter_hosts(self.zone_manager, query)
+
def filter_hosts(self, topic, request_spec, host_list=None):
"""Return a list of hosts which are acceptable for scheduling.
Return value should be a list of (hostname, capability_dict)s.
Derived classes may override this, but may find the
'<topic>_filter' function more appropriate.
"""
-
def _default_filter(self, hostname, capabilities, request_spec):
"""Default filter function if there's no <topic>_filter"""
# NOTE(sirp): The default logic is the equivalent to
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
index b7bbbbcb8..45a8f40d8 100644
--- a/nova/scheduler/host_filter.py
+++ b/nova/scheduler/host_filter.py
@@ -14,7 +14,12 @@
# under the License.
"""
-Host Filter is a mechanism for requesting instance resources.
+The Host Filter classes are a way to ensure that only hosts that are
+appropriate are considered when creating a new instance. Hosts that are
+either incompatible or insufficient to accept a newly-requested instance
+are removed by Host Filter classes from consideration. Those that pass
+the filter are then passed on for weighting or other process for ordering.
+
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
returns the full, unfiltered list of hosts. Flavor is a hard coded
matching mechanism based on flavor criteria and JSON is an ad-hoc
@@ -28,12 +33,6 @@ noted a need for a more expressive way of specifying instances.
Since we don't want to get into building full DSL this is a simple
form as an example of how this could be done. In reality, most
consumers will use the more rigid filters such as FlavorFilter.
-
-Note: These are "required" capability filters. These capabilities
-used must be present or the host will be excluded. The hosts
-returned are then weighed by the Weighted Scheduler. Weights
-can take the more esoteric factors into consideration (such as
-server affinity and customer separation).
"""
import json
@@ -41,9 +40,7 @@ import json
from nova import exception
from nova import flags
from nova import log as logging
-from nova.scheduler import zone_aware_scheduler
from nova import utils
-from nova.scheduler import zone_aware_scheduler
LOG = logging.getLogger('nova.scheduler.host_filter')
@@ -125,9 +122,8 @@ class InstanceTypeFilter(HostFilter):
spec_disk = instance_type['local_gb']
extra_specs = instance_type['extra_specs']
- if host_ram_mb >= spec_ram and \
- disk_bytes >= spec_disk and \
- self._satisfies_extra_specs(capabilities, instance_type):
+ if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and
+ self._satisfies_extra_specs(capabilities, instance_type)):
selected_hosts.append((host, capabilities))
return selected_hosts
@@ -309,7 +305,6 @@ def choose_host_filter(filter_name=None):
function checks the filter name against a predefined set
of acceptable filters.
"""
-
if not filter_name:
filter_name = FLAGS.default_host_filter
for filter_class in FILTERS:
@@ -317,33 +312,3 @@ def choose_host_filter(filter_name=None):
if host_match == filter_name:
return filter_class()
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
-
-
-class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
- """The HostFilterScheduler uses the HostFilter to filter
- hosts for weighing. The particular filter used may be passed in
- as an argument or the default will be used.
-
- request_spec = {'filter': <Filter name>,
- 'instance_type': <InstanceType dict>}
- """
-
- def filter_hosts(self, topic, request_spec, hosts=None):
- """Filter the full host list (from the ZoneManager)"""
-
- filter_name = request_spec.get('filter', None)
- host_filter = choose_host_filter(filter_name)
-
- # TODO(sandy): We're only using InstanceType-based specs
- # currently. Later we'll need to snoop for more detailed
- # host filter requests.
- instance_type = request_spec['instance_type']
- name, query = host_filter.instance_type_to_filter(instance_type)
- return host_filter.filter_hosts(self.zone_manager, query)
-
- def weigh_hosts(self, topic, request_spec, hosts):
- """Derived classes must override this method and return
- a lists of hosts in [{weight, hostname}] format.
- """
- return [dict(weight=1, hostname=hostname, capabilities=caps)
- for hostname, caps in hosts]
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index 329107efe..a58b11289 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -22,11 +22,14 @@ The cost-function and weights are tabulated, and the host with the least cost
is then selected for provisioning.
"""
+# TODO(dabo): This class will be removed in the next merge prop; it remains now
+# because much of the code will be refactored into different classes.
+
import collections
from nova import flags
from nova import log as logging
-from nova.scheduler import zone_aware_scheduler
+from nova.scheduler import abstract_scheduler
from nova import utils
from nova import exception
@@ -61,7 +64,7 @@ def compute_fill_first_cost_fn(host):
return free_mem
-class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
+class LeastCostScheduler(abstract_scheduler.AbstractScheduler):
def __init__(self, *args, **kwargs):
self.cost_fns_cache = {}
super(LeastCostScheduler, self).__init__(*args, **kwargs)
diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py
new file mode 100644
index 000000000..f6a25385f
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_quotas.py
@@ -0,0 +1,152 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import webob
+
+from nova import context
+from nova import test
+from nova.tests.api.openstack import fakes
+
+from nova.api.openstack.contrib.quotas import QuotaSetsController
+
+
+def quota_set(id):
+ return {'quota_set': {'id': id, 'metadata_items': 128, 'volumes': 10,
+ 'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10,
+ 'instances': 10, 'injected_files': 5, 'cores': 20,
+ 'injected_file_content_bytes': 10240}}
+
+
+def quota_set_list():
+ return {'quota_set_list': [quota_set('1234'), quota_set('5678'),
+ quota_set('update_me')]}
+
+
+class QuotaSetsTest(test.TestCase):
+
+ def setUp(self):
+ super(QuotaSetsTest, self).setUp()
+ self.controller = QuotaSetsController()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.user_context = context.RequestContext(self.user_id,
+ self.project_id)
+ self.admin_context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+
+ def test_format_quota_set(self):
+ raw_quota_set = {
+ 'instances': 10,
+ 'cores': 20,
+ 'ram': 51200,
+ 'volumes': 10,
+ 'floating_ips': 10,
+ 'metadata_items': 128,
+ 'gigabytes': 1000,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240}
+
+ quota_set = QuotaSetsController()._format_quota_set('1234',
+ raw_quota_set)
+ qs = quota_set['quota_set']
+
+ self.assertEqual(qs['id'], '1234')
+ self.assertEqual(qs['instances'], 10)
+ self.assertEqual(qs['cores'], 20)
+ self.assertEqual(qs['ram'], 51200)
+ self.assertEqual(qs['volumes'], 10)
+ self.assertEqual(qs['gigabytes'], 1000)
+ self.assertEqual(qs['floating_ips'], 10)
+ self.assertEqual(qs['metadata_items'], 128)
+ self.assertEqual(qs['injected_files'], 5)
+ self.assertEqual(qs['injected_file_content_bytes'], 10240)
+
+ def test_quotas_defaults(self):
+ req = webob.Request.blank('/v1.1/os-quota-sets/fake_tenant/defaults')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(res.status_int, 200)
+ expected = {'quota_set': {
+ 'id': 'fake_tenant',
+ 'instances': 10,
+ 'cores': 20,
+ 'ram': 51200,
+ 'volumes': 10,
+ 'gigabytes': 1000,
+ 'floating_ips': 10,
+ 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240}}
+
+ self.assertEqual(json.loads(res.body), expected)
+
+ def test_quotas_show_as_admin(self):
+ req = webob.Request.blank('/v1.1/os-quota-sets/1234')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(json.loads(res.body), quota_set('1234'))
+
+ def test_quotas_show_as_unauthorized_user(self):
+ req = webob.Request.blank('/v1.1/os-quota-sets/1234')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_context))
+
+ self.assertEqual(res.status_int, 403)
+
+ def test_quotas_update_as_admin(self):
+ updated_quota_set = {'quota_set': {'instances': 50,
+ 'cores': 50, 'ram': 51200, 'volumes': 10,
+ 'gigabytes': 1000, 'floating_ips': 10,
+ 'metadata_items': 128, 'injected_files': 5,
+ 'injected_file_content_bytes': 10240}}
+
+ req = webob.Request.blank('/v1.1/os-quota-sets/update_me')
+ req.method = 'PUT'
+ req.body = json.dumps(updated_quota_set)
+ req.headers['Content-Type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+
+ self.assertEqual(json.loads(res.body), updated_quota_set)
+
+ def test_quotas_update_as_user(self):
+ updated_quota_set = {'quota_set': {'instances': 50,
+ 'cores': 50, 'ram': 51200, 'volumes': 10,
+ 'gigabytes': 1000, 'floating_ips': 10,
+ 'metadata_items': 128, 'injected_files': 5,
+ 'injected_file_content_bytes': 10240}}
+
+ req = webob.Request.blank('/v1.1/os-quota-sets/update_me')
+ req.method = 'PUT'
+ req.body = json.dumps(updated_quota_set)
+ req.headers['Content-Type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_context))
+
+ self.assertEqual(res.status_int, 403)
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index 8a17ee5b2..9c29363c6 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -84,6 +84,19 @@ class ExtensionControllerTest(test.TestCase):
super(ExtensionControllerTest, self).setUp()
ext_path = os.path.join(os.path.dirname(__file__), "extensions")
self.flags(osapi_extensions_path=ext_path)
+ self.ext_list = [
+ "Createserverext"
+ "FlavorExtraSpecs",
+ "Floating_ips",
+ "Fox In Socks",
+ "Hosts",
+ "Keypairs",
+ "Multinic",
+ "Quotas",
+ "SecurityGroups",
+ "Volumes",
+ ]
+ self.ext_list.sort()
def test_list_extensions_json(self):
app = openstack.APIRouterV11()
@@ -96,12 +109,10 @@ class ExtensionControllerTest(test.TestCase):
data = json.loads(response.body)
names = [x['name'] for x in data['extensions']]
names.sort()
- self.assertEqual(names, ["Createserverext", "FlavorExtraSpecs",
- "Floating_ips", "Fox In Socks", "Hosts", "Keypairs", "Multinic",
- "SecurityGroups", "Volumes"])
+ self.assertEqual(names, self.ext_list)
# Make sure that at least Fox in Sox is correct.
- (fox_ext,) = [
+ (fox_ext, ) = [
x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
self.assertEqual(fox_ext, {
'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
@@ -143,10 +154,10 @@ class ExtensionControllerTest(test.TestCase):
# Make sure we have all the extensions.
exts = root.findall('{0}extension'.format(NS))
- self.assertEqual(len(exts), 9)
+ self.assertEqual(len(exts), len(self.ext_list))
# Make sure that at least Fox in Sox is correct.
- (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX']
+ (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
self.assertEqual(fox_ext.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
@@ -218,6 +229,7 @@ class ResourceExtensionTest(test.TestCase):
class InvalidExtension(object):
+
def get_alias(self):
return "THIRD"
diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py
index 788efca52..f4f5cc233 100644
--- a/nova/tests/scheduler/test_zone_aware_scheduler.py
+++ b/nova/tests/scheduler/test_abstract_scheduler.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Tests For Zone Aware Scheduler.
+Tests For Abstract Scheduler.
"""
import json
@@ -25,7 +25,7 @@ from nova import rpc
from nova import test
from nova.compute import api as compute_api
from nova.scheduler import driver
-from nova.scheduler import zone_aware_scheduler
+from nova.scheduler import abstract_scheduler
from nova.scheduler import zone_manager
@@ -60,7 +60,7 @@ def fake_zone_manager_service_states(num_hosts):
return states
-class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
+class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler):
# No need to stub anything at the moment
pass
@@ -161,15 +161,15 @@ def fake_zone_get_all(context):
]
-class ZoneAwareSchedulerTestCase(test.TestCase):
- """Test case for Zone Aware Scheduler."""
+class AbstractSchedulerTestCase(test.TestCase):
+ """Test case for Abstract Scheduler."""
- def test_zone_aware_scheduler(self):
+ def test_abstract_scheduler(self):
"""
Create a nested set of FakeZones, try to build multiple instances
and ensure that a select call returns the appropriate build plan.
"""
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
@@ -194,7 +194,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
properly adjusted based on the scale/offset in the zone
db entries.
"""
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
child_results = fake_call_zone_method(None, None, None, None)
zones = fake_zone_get_all(None)
sched._adjust_child_weights(child_results, zones)
@@ -209,11 +209,11 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
if zone == 'zone3': # Scale x1000
self.assertEqual(scaled.pop(0), w)
- def test_empty_zone_aware_scheduler(self):
+ def test_empty_abstract_scheduler(self):
"""
Ensure empty hosts & child_zones result in NoValidHosts exception.
"""
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
@@ -231,7 +231,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
If the zone_blob hint was passed in, don't re-schedule.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource', fake_provision_resource)
request_spec = {
@@ -248,7 +248,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_local(self):
"""Provision a resource locally or remotely."""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource_locally',
fake_provision_resource_locally)
@@ -260,7 +260,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_remote(self):
"""Provision a resource locally or remotely."""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource_from_blob',
fake_provision_resource_from_blob)
@@ -272,9 +272,9 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_from_blob_empty(self):
"""Provision a resource locally or remotely given no hints."""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
request_spec = {}
- self.assertRaises(zone_aware_scheduler.InvalidBlob,
+ self.assertRaises(abstract_scheduler.InvalidBlob,
sched._provision_resource_from_blob,
None, {}, 1, {}, {})
@@ -283,7 +283,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
Provision a resource locally or remotely when blob hint passed in.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
def fake_create_db_entry_for_new_instance(self, context,
@@ -317,7 +317,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
passed in.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_decrypt_blob',
fake_decrypt_blob_returns_child_info)
was_called = False
@@ -336,7 +336,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
from an immediate child.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
fake_ask_child_zone_to_create_instance)
@@ -350,14 +350,14 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_decrypt_blob(self):
"""Test that the decrypt method works."""
- fixture = FakeZoneAwareScheduler()
+ fixture = FakeAbstractScheduler()
test_data = {"foo": "bar"}
class StubDecryptor(object):
def decryptor(self, key):
return lambda blob: blob
- self.stubs.Set(zone_aware_scheduler, 'crypto',
+ self.stubs.Set(abstract_scheduler, 'crypto',
StubDecryptor())
self.assertEqual(fixture._decrypt_blob(test_data),
diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py
index fbe6b2f77..de7581d0a 100644
--- a/nova/tests/scheduler/test_least_cost_scheduler.py
+++ b/nova/tests/scheduler/test_least_cost_scheduler.py
@@ -18,7 +18,7 @@ Tests For Least Cost Scheduler
from nova import test
from nova.scheduler import least_cost
-from nova.tests.scheduler import test_zone_aware_scheduler
+from nova.tests.scheduler import test_abstract_scheduler
MB = 1024 * 1024
@@ -70,7 +70,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
zone_manager = FakeZoneManager()
- states = test_zone_aware_scheduler.fake_zone_manager_service_states(
+ states = test_abstract_scheduler.fake_zone_manager_service_states(
num_hosts=10)
zone_manager.service_states = states
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 33461025f..158df2a27 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -257,7 +257,9 @@ class SimpleDriverTestCase(test.TestCase):
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 1
+ # NOTE(jk0): If an integer is passed as the image_ref, the image
+ # service will use the default image service (in this case, the fake).
+ inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index e2fa3b140..4f5d36f14 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -1341,6 +1341,69 @@ class ComputeTestCase(test.TestCase):
db.instance_destroy(c, instance_id2)
db.instance_destroy(c, instance_id3)
+ def test_get_all_by_metadata(self):
+ """Test searching instances by metadata"""
+
+ c = context.get_admin_context()
+ instance_id0 = self._create_instance()
+ instance_id1 = self._create_instance({
+ 'metadata': {'key1': 'value1'}})
+ instance_id2 = self._create_instance({
+ 'metadata': {'key2': 'value2'}})
+ instance_id3 = self._create_instance({
+ 'metadata': {'key3': 'value3'}})
+ instance_id4 = self._create_instance({
+ 'metadata': {'key3': 'value3',
+ 'key4': 'value4'}})
+
+ # get all instances
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {}})
+ self.assertEqual(len(instances), 5)
+
+ # wrong key/value combination
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {'key1': 'value3'}})
+ self.assertEqual(len(instances), 0)
+
+ # non-existing keys
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {'key5': 'value1'}})
+ self.assertEqual(len(instances), 0)
+
+ # find existing instance
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {'key2': 'value2'}})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0].id, instance_id2)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {'key3': 'value3'}})
+ self.assertEqual(len(instances), 2)
+ instance_ids = [instance.id for instance in instances]
+ self.assertTrue(instance_id3 in instance_ids)
+ self.assertTrue(instance_id4 in instance_ids)
+
+ # multiple criterias as a dict
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {'key3': 'value3',
+ 'key4': 'value4'}})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0].id, instance_id4)
+
+ # multiple criterias as a list
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': [{'key4': 'value4'},
+ {'key3': 'value3'}]})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0].id, instance_id4)
+
+ db.instance_destroy(c, instance_id0)
+ db.instance_destroy(c, instance_id1)
+ db.instance_destroy(c, instance_id2)
+ db.instance_destroy(c, instance_id3)
+ db.instance_destroy(c, instance_id4)
+
@staticmethod
def _parse_db_block_device_mapping(bdm_ref):
attr_list = ('delete_on_termination', 'device_name', 'no_device',
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index ad678714e..bfc7a6d44 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -39,7 +39,7 @@ class MetadataTestCase(test.TestCase):
'key_name': None,
'host': 'test',
'launch_index': 1,
- 'instance_type': 'm1.tiny',
+ 'instance_type': {'name': 'm1.tiny'},
'reservation_id': 'r-xxxxxxxx',
'user_data': '',
'image_ref': 7,
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 1f01695a8..581a4765c 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -376,7 +376,7 @@ class CommonNetworkTestCase(test.TestCase):
raise exception.NetworkNotFoundForCidr()
def network_create_safe(self, context, net):
- fakenet = {}
+ fakenet = dict(net)
fakenet['id'] = 999
return fakenet
@@ -390,6 +390,9 @@ class CommonNetworkTestCase(test.TestCase):
def deallocate_fixed_ip(self, context, address):
self.deallocate_called = address
+ def _create_fixed_ips(self, context, network_id):
+ pass
+
def fake_create_fixed_ips(self, context, network_id):
return None
@@ -407,16 +410,20 @@ class CommonNetworkTestCase(test.TestCase):
def test_validate_cidrs(self):
manager = self.FakeNetworkManager()
- nets = manager._validate_cidrs(None, '192.168.0.0/24', 1, 256)
+ nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
+ False, 1, 256, None, None, None,
+ None)
self.assertEqual(1, len(nets))
- cidrs = [str(net) for net in nets]
+ cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = self.FakeNetworkManager()
- nets = manager._validate_cidrs(None, '192.168.0.0/24', 2, 128)
+ nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
+ False, 2, 128, None, None, None,
+ None)
self.assertEqual(2, len(nets))
- cidrs = [str(net) for net in nets]
+ cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/25' in cidrs)
self.assertTrue('192.168.0.128/25' in cidrs)
@@ -427,9 +434,11 @@ class CommonNetworkTestCase(test.TestCase):
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/24'}])
self.mox.ReplayAll()
- nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256)
+ nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
+ False, 4, 256, None, None, None,
+ None)
self.assertEqual(4, len(nets))
- cidrs = [str(net) for net in nets]
+ cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
@@ -445,8 +454,9 @@ class CommonNetworkTestCase(test.TestCase):
self.mox.ReplayAll()
# ValueError: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
- args = [None, '192.168.2.0/24', 1, 256]
- self.assertRaises(ValueError, manager._validate_cidrs, *args)
+ args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None,
+ None, None)
+ self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_smaller_cidr_in_use(self):
manager = self.FakeNetworkManager()
@@ -455,9 +465,10 @@ class CommonNetworkTestCase(test.TestCase):
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/25'}])
self.mox.ReplayAll()
- nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256)
+ nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
+ False, 4, 256, None, None, None, None)
self.assertEqual(4, len(nets))
- cidrs = [str(net) for net in nets]
+ cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
@@ -471,9 +482,10 @@ class CommonNetworkTestCase(test.TestCase):
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/29'}])
self.mox.ReplayAll()
- nets = manager._validate_cidrs(None, '192.168.2.0/24', 3, 32)
+ nets = manager.create_networks(None, 'fake', '192.168.2.0/24',
+ False, 3, 32, None, None, None, None)
self.assertEqual(3, len(nets))
- cidrs = [str(net) for net in nets]
+ cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
@@ -488,17 +500,19 @@ class CommonNetworkTestCase(test.TestCase):
{'id': 3, 'cidr': '192.168.2.128/26'}]
manager.db.network_get_all(ctxt).AndReturn(in_use)
self.mox.ReplayAll()
- args = [None, '192.168.2.0/24', 3, 64]
+ args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None,
+ None, None)
# ValueError: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
- self.assertRaises(ValueError, manager._validate_cidrs, *args)
+ self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = self.FakeNetworkManager()
- args = [None, '192.168.0.0/24', 2, 256]
+ args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
+ None, None)
# ValueError: network_size * num_networks exceeds cidr size
- self.assertRaises(ValueError, manager._validate_cidrs, *args)
+ self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_already_used(self):
manager = self.FakeNetworkManager()
@@ -508,20 +522,23 @@ class CommonNetworkTestCase(test.TestCase):
'cidr': '192.168.0.0/24'}])
self.mox.ReplayAll()
# ValueError: cidr already in use
- args = [None, '192.168.0.0/24', 1, 256]
- self.assertRaises(ValueError, manager._validate_cidrs, *args)
+ args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
+ None, None)
+ self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = self.FakeNetworkManager()
- args = [None, '192.168.0.0/24', 200, 256]
+ args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
+ None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
- self.assertRaises(ValueError, manager._validate_cidrs, *args)
+ self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = self.FakeNetworkManager()
- nets = manager._validate_cidrs(None, '192.168.0.0/16', 2, 256)
- returned_cidrs = [str(net) for net in nets]
+ nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
+ False, 2, 256, None, None, None, None)
+ returned_cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in returned_cidrs)
self.assertTrue('192.168.1.0/24' in returned_cidrs)
@@ -532,10 +549,11 @@ class CommonNetworkTestCase(test.TestCase):
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
- args = [None, '192.168.0.0/24', 1, 256]
+ args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
+ None, None)
# ValueError: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
- self.assertRaises(ValueError, manager._validate_cidrs, *args)
+ self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
@@ -545,7 +563,7 @@ class CommonNetworkTestCase(test.TestCase):
args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None,
None]
result = manager.create_networks(*args)
- self.assertEqual(manager.create_networks(*args), None)
+ self.assertTrue(manager.create_networks(*args))
def test_create_networks_cidr_already_used(self):
manager = self.FakeNetworkManager()
@@ -565,4 +583,4 @@ class CommonNetworkTestCase(test.TestCase):
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None,
None]
- self.assertEqual(manager.create_networks(*args), None)
+ self.assertTrue(manager.create_networks(*args))
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 0d0f84e32..a6a1febd6 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -28,10 +28,10 @@ from nova import utils
def stubout_instance_snapshot(stubs):
@classmethod
- def fake_fetch_image(cls, context, session, instance_id, image, user,
+ def fake_fetch_image(cls, context, session, instance, image, user,
project, type):
from nova.virt.xenapi.fake import create_vdi
- name_label = "instance-%s" % instance_id
+ name_label = "instance-%s" % instance.id
#TODO: create fake SR record
sr_ref = "fakesr"
vdi_ref = create_vdi(name_label=name_label, read_only=False,
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 2b17e244a..e8a657bac 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -1538,8 +1538,9 @@ class LibvirtConnection(driver.ComputeDriver):
# If any instances never launch at destination host,
# basic-filtering must be set here.
self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
- # setting up n)ova-instance-instance-xx mainly.
- self.firewall_driver.prepare_instance_filter(instance_ref, network_info)
+ # setting up nova-instance-instance-xx mainly.
+ self.firewall_driver.prepare_instance_filter(instance_ref,
+ network_info)
# wait for completion
timeout_count = range(FLAGS.live_migration_retry_count)
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 4cb9abda4..5a91a4e28 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -80,12 +80,14 @@ class LibvirtBridgeDriver(VIFDriver):
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),
{'vlan': network['vlan'],
'bridge': network['bridge']})
- linux_net.ensure_vlan_bridge(network['vlan'],
+ linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
+ network['vlan'],
network['bridge'],
network['bridge_interface'])
else:
LOG.debug(_("Ensuring bridge %s"), network['bridge'])
- linux_net.ensure_bridge(network['bridge'],
+ linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
+ network['bridge'],
network['bridge_interface'])
return self._get_configurations(network, mapping)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 1aa642e4e..7c91aa9b9 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -140,6 +140,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
'location': '',
'xenstore_data': '',
'sm_config': {},
+ 'physical_utilisation': '123',
'VBDs': {}})
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index ba5cf4b49..4a1f07bb1 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -31,6 +31,7 @@ import uuid
from xml.dom import minidom
import glance.client
+from nova import db
from nova import exception
from nova import flags
import nova.image
@@ -413,7 +414,7 @@ class VMHelper(HelperBase):
return vdi_ref
@classmethod
- def fetch_image(cls, context, session, instance_id, image, user_id,
+ def fetch_image(cls, context, session, instance, image, user_id,
project_id, image_type):
"""Fetch image from glance based on image type.
@@ -422,18 +423,19 @@ class VMHelper(HelperBase):
"""
if image_type == ImageType.DISK_VHD:
return cls._fetch_image_glance_vhd(context,
- session, instance_id, image, image_type)
+ session, instance, image, image_type)
else:
return cls._fetch_image_glance_disk(context,
- session, instance_id, image, image_type)
+ session, instance, image, image_type)
@classmethod
- def _fetch_image_glance_vhd(cls, context, session, instance_id, image,
+ def _fetch_image_glance_vhd(cls, context, session, instance, image,
image_type):
"""Tell glance to download an image and put the VHDs into the SR
Returns: A list of dictionaries that describe VDIs
"""
+ instance_id = instance.id
LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
% locals())
sr_ref = safe_find_sr(session)
@@ -467,17 +469,58 @@ class VMHelper(HelperBase):
cls.scan_sr(session, instance_id, sr_ref)
- # Pull out the UUID of the first VDI
- vdi_uuid = vdis[0]['vdi_uuid']
+ # Pull out the UUID of the first VDI (which is the os VDI)
+ os_vdi_uuid = vdis[0]['vdi_uuid']
+
# Set the name-label to ease debugging
- vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(os_vdi_uuid)
primary_name_label = get_name_label_for_image(image)
session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label)
+ cls._check_vdi_size(context, session, instance, os_vdi_uuid)
return vdis
@classmethod
- def _fetch_image_glance_disk(cls, context, session, instance_id, image,
+ def _get_vdi_chain_size(cls, context, session, vdi_uuid):
+ """Compute the total size of a VDI chain, starting with the specified
+ VDI UUID.
+
+ This will walk the VDI chain to the root, add the size of each VDI into
+ the total.
+ """
+ size_bytes = 0
+ for vdi_rec in walk_vdi_chain(session, vdi_uuid):
+ cur_vdi_uuid = vdi_rec['uuid']
+ vdi_size_bytes = int(vdi_rec['physical_utilisation'])
+ LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
+ '%(vdi_size_bytes)d' % locals()))
+ size_bytes += vdi_size_bytes
+ return size_bytes
+
+ @classmethod
+ def _check_vdi_size(cls, context, session, instance, vdi_uuid):
+ size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid)
+
+ # FIXME(jk0): this was copied directly from compute.manager.py, let's
+ # refactor this to a common area
+ instance_type_id = instance['instance_type_id']
+ instance_type = db.instance_type_get(context,
+ instance_type_id)
+ allowed_size_gb = instance_type['local_gb']
+ allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+
+ LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes="
+ "%(allowed_size_bytes)d") % locals())
+
+ if size_bytes > allowed_size_bytes:
+ LOG.info(_("Image size %(size_bytes)d exceeded"
+ " instance_type allowed size "
+ "%(allowed_size_bytes)d")
+ % locals())
+ raise exception.ImageTooLarge()
+
+ @classmethod
+ def _fetch_image_glance_disk(cls, context, session, instance, image,
image_type):
"""Fetch the image from Glance
@@ -489,6 +532,7 @@ class VMHelper(HelperBase):
Returns: A single filename if image_type is KERNEL_RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
+ instance_id = instance.id
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
@@ -807,6 +851,21 @@ def get_vhd_parent_uuid(session, vdi_ref):
return None
+def walk_vdi_chain(session, vdi_uuid):
+ """Yield vdi_recs for each element in a VDI chain"""
+ # TODO(jk0): perhaps make get_vhd_parent use this
+ while True:
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ yield vdi_rec
+
+ parent_uuid = vdi_rec['sm_config'].get('vhd-parent')
+ if parent_uuid:
+ vdi_uuid = parent_uuid
+ else:
+ break
+
+
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
""" Spin until the parent VHD is coalesced into its parent VHD
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 1fefd1291..eb0a846b5 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -137,7 +137,7 @@ class VMOps(object):
def _create_disks(self, context, instance):
disk_image_type = VMHelper.determine_disk_image_type(instance)
vdis = VMHelper.fetch_image(context, self._session,
- instance.id, instance.image_ref,
+ instance, instance.image_ref,
instance.user_id, instance.project_id,
disk_image_type)
return vdis
@@ -182,11 +182,11 @@ class VMOps(object):
try:
if instance.kernel_id:
kernel = VMHelper.fetch_image(context, self._session,
- instance.id, instance.kernel_id, instance.user_id,
+ instance, instance.kernel_id, instance.user_id,
instance.project_id, ImageType.KERNEL)[0]
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(context, self._session,
- instance.id, instance.ramdisk_id, instance.user_id,
+ instance, instance.ramdisk_id, instance.user_id,
instance.project_id, ImageType.RAMDISK)[0]
# Create the VM ref and attach the first disk
first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
diff --git a/smoketests/test_netadmin.py b/smoketests/test_netadmin.py
index 8c8fa35b8..ef73e6f4c 100644
--- a/smoketests/test_netadmin.py
+++ b/smoketests/test_netadmin.py
@@ -107,14 +107,18 @@ class AddressTests(base.UserSmokeTestCase):
class SecurityGroupTests(base.UserSmokeTestCase):
- def __public_instance_is_accessible(self):
- id_url = "latest/meta-data/instance-id"
+ def __get_metadata_item(self, category):
+ id_url = "latest/meta-data/%s" % category
options = "-f -s --max-time 1"
command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url)
status, output = commands.getstatusoutput(command)
- instance_id = output.strip()
+ value = output.strip()
if status > 0:
return False
+ return value
+
+ def __public_instance_is_accessible(self):
+ instance_id = self.__get_metadata_item('instance-id')
if not instance_id:
return False
if instance_id != self.data['instance'].id:
@@ -166,7 +170,14 @@ class SecurityGroupTests(base.UserSmokeTestCase):
finally:
result = self.conn.disassociate_address(self.data['public_ip'])
- def test_005_can_revoke_security_group_ingress(self):
+ def test_005_validate_metadata(self):
+
+ instance = self.data['instance']
+ self.assertTrue(instance.instance_type,
+ self.__get_metadata_item("instance-type"))
+ #FIXME(dprince): validate more metadata here
+
+ def test_006_can_revoke_security_group_ingress(self):
self.assertTrue(self.conn.revoke_security_group(TEST_GROUP,
ip_protocol='tcp',
from_port=80,