summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorJason Koelker <jason@koelker.net>2011-06-27 16:49:33 -0500
committerJason Koelker <jason@koelker.net>2011-06-27 16:49:33 -0500
commit06bc61dbe63182bfc3d95de0c7330ccdc1210379 (patch)
tree3b1f05118d773ae46a6ad2c129f6a789669dd43b /nova
parent42f97776424df69889b369d5fdd17653e5ac887b (diff)
parent0a2c2e0975c3037372b47b09a7f547eb197ef7d7 (diff)
merge with trey
Diffstat (limited to 'nova')
-rw-r--r--nova/api/ec2/admin.py65
-rw-r--r--nova/api/ec2/cloud.py4
-rw-r--r--nova/api/openstack/image_metadata.py64
-rw-r--r--nova/api/openstack/images.py45
-rw-r--r--nova/api/openstack/views/images.py30
-rw-r--r--nova/api/openstack/wsgi.py8
-rw-r--r--nova/compute/api.py20
-rw-r--r--nova/compute/manager.py89
-rw-r--r--nova/db/api.py57
-rw-r--r--nova/db/sqlalchemy/api.py149
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py73
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py75
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/028_multi_nic.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/023_multi_nic.py)4
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/029_fk_fixed_ips_virtual_interface_id.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/024_fk_fixed_ips_virtual_interface_id.py)0
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/029_sqlite_downgrade.sql (renamed from nova/db/sqlalchemy/migrate_repo/versions/024_sqlite_downgrade.sql)0
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/029_sqlite_update.sql (renamed from nova/db/sqlalchemy/migrate_repo/versions/024_sqlite_upgrade.sql)0
-rw-r--r--nova/db/sqlalchemy/models.py29
-rw-r--r--nova/image/glance.py2
-rw-r--r--nova/network/api.py2
-rw-r--r--nova/network/linux_net.py11
-rw-r--r--nova/network/manager.py76
-rw-r--r--nova/network/xenapi_net.py6
-rw-r--r--nova/tests/api/openstack/fakes.py7
-rw-r--r--nova/tests/api/openstack/test_image_metadata.py123
-rw-r--r--nova/tests/api/openstack/test_images.py41
-rw-r--r--nova/tests/api/openstack/test_limits.py3
-rw-r--r--nova/tests/api/openstack/test_servers.py2
-rw-r--r--nova/tests/api/openstack/test_wsgi.py4
-rw-r--r--nova/tests/test_adminapi.py107
-rw-r--r--nova/tests/test_cloud.py4
-rw-r--r--nova/tests/test_compute.py8
-rw-r--r--nova/tests/test_libvirt.py81
-rw-r--r--nova/tests/test_xenapi.py91
-rw-r--r--nova/utils.py10
-rw-r--r--nova/virt/driver.py8
-rw-r--r--nova/virt/fake.py31
-rw-r--r--nova/virt/libvirt/connection.py30
-rw-r--r--nova/virt/libvirt/firewall.py188
-rw-r--r--nova/virt/libvirt/netutils.py34
-rw-r--r--nova/virt/xenapi/vmops.py72
40 files changed, 1414 insertions, 239 deletions
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index 57d0a0339..df7876b9d 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -21,7 +21,11 @@ Admin API controller, exposed through http via the api worker.
"""
import base64
+import datetime
+import netaddr
+import urllib
+from nova import compute
from nova import db
from nova import exception
from nova import flags
@@ -117,6 +121,9 @@ class AdminController(object):
def __str__(self):
return 'AdminController'
+ def __init__(self):
+ self.compute_api = compute.API()
+
def describe_instance_types(self, context, **_kwargs):
"""Returns all active instance types data (vcpus, memory, etc.)"""
return {'instanceTypeSet': [instance_dict(v) for v in
@@ -324,3 +331,61 @@ class AdminController(object):
rv.append(host_dict(host, compute, instances, volume, volumes,
now))
return {'hosts': rv}
+
+ def _provider_fw_rule_exists(self, context, rule):
+ # TODO(todd): we call this repeatedly, can we filter by protocol?
+ for old_rule in db.provider_fw_rule_get_all(context):
+ if all([rule[k] == old_rule[k] for k in ('cidr', 'from_port',
+ 'to_port', 'protocol')]):
+ return True
+ return False
+
+ def block_external_addresses(self, context, cidr):
+ """Add provider-level firewall rules to block incoming traffic."""
+ LOG.audit(_('Blocking traffic to all projects incoming from %s'),
+ cidr, context=context)
+ cidr = urllib.unquote(cidr).decode()
+ # raise if invalid
+ netaddr.IPNetwork(cidr)
+ rule = {'cidr': cidr}
+ tcp_rule = rule.copy()
+ tcp_rule.update({'protocol': 'tcp', 'from_port': 1, 'to_port': 65535})
+ udp_rule = rule.copy()
+ udp_rule.update({'protocol': 'udp', 'from_port': 1, 'to_port': 65535})
+ icmp_rule = rule.copy()
+ icmp_rule.update({'protocol': 'icmp', 'from_port': -1,
+ 'to_port': None})
+ rules_added = 0
+ if not self._provider_fw_rule_exists(context, tcp_rule):
+ db.provider_fw_rule_create(context, tcp_rule)
+ rules_added += 1
+ if not self._provider_fw_rule_exists(context, udp_rule):
+ db.provider_fw_rule_create(context, udp_rule)
+ rules_added += 1
+ if not self._provider_fw_rule_exists(context, icmp_rule):
+ db.provider_fw_rule_create(context, icmp_rule)
+ rules_added += 1
+ if not rules_added:
+ raise exception.ApiError(_('Duplicate rule'))
+ self.compute_api.trigger_provider_fw_rules_refresh(context)
+ return {'status': 'OK', 'message': 'Added %s rules' % rules_added}
+
+ def describe_external_address_blocks(self, context):
+ blocks = db.provider_fw_rule_get_all(context)
+ # NOTE(todd): use a set since we have icmp/udp/tcp rules with same cidr
+ blocks = set([b.cidr for b in blocks])
+ blocks = [{'cidr': b} for b in blocks]
+ return {'externalIpBlockInfo':
+ list(sorted(blocks, key=lambda k: k['cidr']))}
+
+ def remove_external_address_block(self, context, cidr):
+ LOG.audit(_('Removing ip block from %s'), cidr, context=context)
+ cidr = urllib.unquote(cidr).decode()
+ # raise if invalid
+ netaddr.IPNetwork(cidr)
+ rules = db.provider_fw_rule_get_all_by_cidr(context, cidr)
+ for rule in rules:
+ db.provider_fw_rule_destroy(context, rule['id'])
+ if rules:
+ self.compute_api.trigger_provider_fw_rules_refresh(context)
+ return {'status': 'OK', 'message': 'Deleted %s rules' % len(rules)}
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 078d35887..e7f94830f 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -23,7 +23,7 @@ datastore.
"""
import base64
-import IPy
+import netaddr
import os
import urllib
import tempfile
@@ -452,7 +452,7 @@ class CloudController(object):
elif cidr_ip:
# If this fails, it throws an exception. This is what we want.
cidr_ip = urllib.unquote(cidr_ip).decode()
- IPy.IP(cidr_ip)
+ netaddr.IPNetwork(cidr_ip)
values['cidr'] = cidr_ip
else:
values['cidr'] = '0.0.0.0/0'
diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py
index ebfe2bde9..c0e92f2fc 100644
--- a/nova/api/openstack/image_metadata.py
+++ b/nova/api/openstack/image_metadata.py
@@ -16,6 +16,7 @@
# under the License.
from webob import exc
+from xml.dom import minidom
from nova import flags
from nova import image
@@ -59,7 +60,7 @@ class Controller(object):
context = req.environ['nova.context']
metadata = self._get_metadata(context, image_id)
if id in metadata:
- return {id: metadata[id]}
+ return {'meta': {id: metadata[id]}}
else:
return faults.Fault(exc.HTTPNotFound())
@@ -77,15 +78,22 @@ class Controller(object):
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
- if not id in body:
+
+ try:
+ meta = body['meta']
+ except KeyError:
+ expl = _('Incorrect request body format')
+ raise exc.HTTPBadRequest(explanation=expl)
+
+ if not id in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
- if len(body) > 1:
+ if len(meta) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
img = self.image_service.show(context, image_id)
metadata = self._get_metadata(context, image_id, img)
- metadata[id] = body[id]
+ metadata[id] = meta[id]
self._check_quota_limit(context, metadata)
img['properties'] = metadata
self.image_service.update(context, image_id, img, None)
@@ -103,9 +111,55 @@ class Controller(object):
self.image_service.update(context, image_id, img, None)
+class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
+ def __init__(self):
+ xmlns = wsgi.XMLNS_V11
+ super(ImageMetadataXMLSerializer, self).__init__(xmlns=xmlns)
+
+ def _meta_item_to_xml(self, doc, key, value):
+ node = doc.createElement('meta')
+ node.setAttribute('key', key)
+ text = doc.createTextNode(value)
+ node.appendChild(text)
+ return node
+
+ def _meta_list_to_xml(self, xml_doc, meta_items):
+ container_node = xml_doc.createElement('metadata')
+ for (key, value) in meta_items:
+ item_node = self._meta_item_to_xml(xml_doc, key, value)
+ container_node.appendChild(item_node)
+ return container_node
+
+ def _meta_list_to_xml_string(self, metadata_dict):
+ xml_doc = minidom.Document()
+ items = metadata_dict['metadata'].items()
+ container_node = self._meta_list_to_xml(xml_doc, items)
+ self._add_xmlns(container_node)
+ return container_node.toprettyxml(indent=' ')
+
+ def index(self, metadata_dict):
+ return self._meta_list_to_xml_string(metadata_dict)
+
+ def create(self, metadata_dict):
+ return self._meta_list_to_xml_string(metadata_dict)
+
+ def _meta_item_to_xml_string(self, meta_item_dict):
+ xml_doc = minidom.Document()
+ item_key, item_value = meta_item_dict.items()[0]
+ item_node = self._meta_item_to_xml(xml_doc, item_key, item_value)
+ self._add_xmlns(item_node)
+ return item_node.toprettyxml(indent=' ')
+
+ def show(self, meta_item_dict):
+ return self._meta_item_to_xml_string(meta_item_dict['meta'])
+
+ def update(self, meta_item_dict):
+ return self._meta_item_to_xml_string(meta_item_dict['meta'])
+
+
def create_resource():
serializers = {
- 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11),
+ 'application/xml': ImageMetadataXMLSerializer(),
}
return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 5ffd8e96a..d43340e10 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os.path
+
import webob.exc
from nova import compute
@@ -99,21 +101,27 @@ class Controller(object):
raise webob.exc.HTTPBadRequest()
try:
- server_id = self._server_id_from_req_data(body)
+ server_id = self._server_id_from_req(req, body)
image_name = body["image"]["name"]
except KeyError:
raise webob.exc.HTTPBadRequest()
- image = self._compute_service.snapshot(context, server_id, image_name)
+ props = self._get_extra_properties(req, body)
+
+ image = self._compute_service.snapshot(context, server_id,
+ image_name, props)
return dict(image=self.get_builder(req).build(image, detail=True))
def get_builder(self, request):
"""Indicates that you must use a Controller subclass."""
raise NotImplementedError
- def _server_id_from_req_data(self, data):
+ def _server_id_from_req(self, req, data):
raise NotImplementedError()
+ def _get_extra_properties(self, req, data):
+ return {}
+
class ControllerV10(Controller):
"""Version 1.0 specific controller logic."""
@@ -149,8 +157,12 @@ class ControllerV10(Controller):
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
- def _server_id_from_req_data(self, data):
- return data['image']['serverId']
+ def _server_id_from_req(self, req, data):
+ try:
+ return data['image']['serverId']
+ except KeyError:
+ msg = _("Expected serverId attribute on server entity.")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
class ControllerV11(Controller):
@@ -189,8 +201,27 @@ class ControllerV11(Controller):
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
- def _server_id_from_req_data(self, data):
- return data['image']['serverRef']
+ def _server_id_from_req(self, req, data):
+ try:
+ server_ref = data['image']['serverRef']
+ except KeyError:
+ msg = _("Expected serverRef attribute on server entity.")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ head, tail = os.path.split(server_ref)
+
+ if head and head != os.path.join(req.application_url, 'servers'):
+ msg = _("serverRef must match request url")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ return tail
+
+ def _get_extra_properties(self, req, data):
+ server_ref = data['image']['serverRef']
+ if not server_ref.startswith('http'):
+ server_ref = os.path.join(req.application_url, 'servers',
+ server_ref)
+ return {'instance_ref': server_ref}
def create_resource(version='1.0'):
diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py
index 2773c9c13..d6a054102 100644
--- a/nova/api/openstack/views/images.py
+++ b/nova/api/openstack/views/images.py
@@ -46,13 +46,9 @@ class ViewBuilder(object):
except KeyError:
image['status'] = image['status'].upper()
- def _build_server(self, image, instance_id):
+ def _build_server(self, image, image_obj):
"""Indicates that you must use a ViewBuilder subclass."""
- raise NotImplementedError
-
- def generate_server_ref(self, server_id):
- """Return an href string pointing to this server."""
- return os.path.join(self._url, "servers", str(server_id))
+ raise NotImplementedError()
def generate_href(self, image_id):
"""Return an href string pointing to this object."""
@@ -60,8 +56,6 @@ class ViewBuilder(object):
def build(self, image_obj, detail=False):
"""Return a standardized image structure for display by the API."""
- properties = image_obj.get("properties", {})
-
self._format_dates(image_obj)
if "status" in image_obj:
@@ -72,11 +66,7 @@ class ViewBuilder(object):
"name": image_obj.get("name"),
}
- if "instance_id" in properties:
- try:
- self._build_server(image, int(properties["instance_id"]))
- except ValueError:
- pass
+ self._build_server(image, image_obj)
if detail:
image.update({
@@ -94,15 +84,21 @@ class ViewBuilder(object):
class ViewBuilderV10(ViewBuilder):
"""OpenStack API v1.0 Image Builder"""
- def _build_server(self, image, instance_id):
- image["serverId"] = instance_id
+ def _build_server(self, image, image_obj):
+ try:
+ image['serverId'] = int(image_obj['properties']['instance_id'])
+ except (KeyError, ValueError):
+ pass
class ViewBuilderV11(ViewBuilder):
"""OpenStack API v1.1 Image Builder"""
- def _build_server(self, image, instance_id):
- image["serverRef"] = self.generate_server_ref(instance_id)
+ def _build_server(self, image, image_obj):
+ try:
+ image['serverRef'] = image_obj['properties']['instance_ref']
+ except KeyError:
+ return
def build(self, image_obj, detail=False):
"""Return a standardized image structure for display by the API."""
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index a57b7f72b..5d24b4cca 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -232,12 +232,14 @@ class XMLDictSerializer(DictSerializer):
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
- xmlns = node.getAttribute('xmlns')
- if not xmlns and self.xmlns:
- node.setAttribute('xmlns', self.xmlns)
+ self._add_xmlns(node)
return node.toprettyxml(indent=' ', encoding='utf-8')
+ def _add_xmlns(self, node):
+ if self.xmlns is not None:
+ node.setAttribute('xmlns', self.xmlns)
+
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 57740cbd8..4eff7bac0 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -161,6 +161,9 @@ class API(base.Base):
os_type = None
if 'properties' in image and 'os_type' in image['properties']:
os_type = image['properties']['os_type']
+ architecture = None
+ if 'properties' in image and 'arch' in image['properties']:
+ architecture = image['properties']['arch']
vm_mode = None
if 'properties' in image and 'vm_mode' in image['properties']:
vm_mode = image['properties']['vm_mode']
@@ -226,6 +229,7 @@ class API(base.Base):
'metadata': metadata,
'availability_zone': availability_zone,
'os_type': os_type,
+ 'architecture': architecture,
'vm_mode': vm_mode}
return (num_instances, base_options, security_groups)
@@ -474,6 +478,16 @@ class API(base.Base):
{"method": "refresh_security_group_members",
"args": {"security_group_id": group_id}})
+ def trigger_provider_fw_rules_refresh(self, context):
+ """Called when a rule is added to or removed from a security_group"""
+
+ hosts = [x['host'] for (x, idx)
+ in db.service_get_all_compute_sorted(context)]
+ for host in hosts:
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {'method': 'refresh_provider_fw_rules', 'args': {}})
+
def update(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
@@ -679,7 +693,7 @@ class API(base.Base):
raise exception.Error(_("Unable to find host for Instance %s")
% instance_id)
- def snapshot(self, context, instance_id, name):
+ def snapshot(self, context, instance_id, name, extra_properties=None):
"""Snapshot the given instance.
:returns: A dict containing image metadata
@@ -687,6 +701,7 @@ class API(base.Base):
properties = {'instance_id': str(instance_id),
'user_id': str(context.user_id),
'image_state': 'creating'}
+ properties.update(extra_properties or {})
sent_meta = {'name': name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = self.image_service.create(context, sent_meta)
@@ -974,7 +989,8 @@ class API(base.Base):
"unable to associate floating ip") % instance_id
raise exception.ApiError(msg)
if len(fixed_ip_addrs) > 1:
- LOG.warning(_("multiple fixed_ips exist, using the first"))
+ LOG.warning(_("multiple fixed_ips exist, using the first: %s"),
+ fixed_ip_addrs[0])
self.network_api.associate_floating_ip(context,
floating_ip=address,
fixed_ip=fixed_ip_addrs[0])
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index c0d8ecea2..102a3c5a7 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -201,6 +201,11 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_members(security_group_id)
+ @exception.wrap_exception
+ def refresh_provider_fw_rules(self, context, **_kwargs):
+ """This call passes straight through to the virtualization driver."""
+ return self.driver.refresh_provider_fw_rules()
+
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
self.db.instance_set_state(context,
@@ -277,39 +282,45 @@ class ComputeManager(manager.SchedulerDependentManager):
'networking')
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
- # NOTE(vish): This could be a cast because we don't do anything
- # with the address currently, but I'm leaving it as
- # a call to ensure that network setup completes. We
- # will eventually also need to save the address here.
- if not FLAGS.stub_network:
- network_info = self.network_api.allocate_for_instance(context,
- instance,
- vpn=is_vpn)
- LOG.debug(_("instance network_info: |%s|"), network_info)
- self.network_manager.setup_compute_network(context, instance_id)
- else:
- # TODO(tr3buchet) not really sure how this should be handled.
- # virt requires network_info to be passed in but stub_network
- # is enabled. Setting to [] for now will cause virt to skip
- # all vif creation and network injection, maybe this is correct
- network_info = []
-
- block_device_mapping = self._setup_block_device_mapping(context,
- instance_id)
+ try:
+ # NOTE(vish): This could be a cast because we don't do anything
+ # with the address currently, but I'm leaving it as
+ # a call to ensure that network setup completes. We
+ # will eventually also need to save the address here.
+ if not FLAGS.stub_network:
+ network_info = self.network_api.allocate_for_instance(context,
+ instance, vpn=is_vpn)
+ LOG.debug(_("instance network_info: |%s|"), network_info)
+ self.network_manager.setup_compute_network(context,
+ instance_id)
+ else:
+ # TODO(tr3buchet) not really sure how this should be handled.
+ # virt requires network_info to be passed in but stub_network
+ # is enabled. Setting to [] for now will cause virt to skip
+ # all vif creation and network injection, maybe this is correct
+ network_info = []
- # TODO(vish) check to make sure the availability zone matches
- self._update_state(context, instance_id, power_state.BUILDING)
+ bd_mapping = self._setup_block_device_mapping(context, instance_id)
- try:
- self.driver.spawn(instance_ref, network_info, block_device_mapping)
- except Exception as ex: # pylint: disable=W0702
- msg = _("Instance '%(instance_id)s' failed to spawn. Is "
- "virtualization enabled in the BIOS? Details: "
- "%(ex)s") % locals()
- LOG.exception(msg)
+ # TODO(vish) check to make sure the availability zone matches
+ self._update_state(context, instance_id, power_state.BUILDING)
- self._update_launched_at(context, instance_id)
- self._update_state(context, instance_id)
+ try:
+ self.driver.spawn(instance, network_info, bd_mapping)
+ except Exception as ex: # pylint: disable=W0702
+ msg = _("Instance '%(instance_id)s' failed to spawn. Is "
+ "virtualization enabled in the BIOS? Details: "
+ "%(ex)s") % locals()
+ LOG.exception(msg)
+
+ self._update_launched_at(context, instance_id)
+ self._update_state(context, instance_id)
+ except exception.InstanceNotFound:
+ # FIXME(wwolf): We are just ignoring InstanceNotFound
+ # exceptions here in case the instance was immediately
+ # deleted before it actually got created. This should
+ # be fixed once we have no-db-messaging
+ pass
@exception.wrap_exception
def run_instance(self, context, instance_id, **kwargs):
@@ -508,6 +519,24 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
+ def agent_update(self, context, instance_id, url, md5hash):
+ """Update agent running on an instance on this host."""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ instance_id = instance_ref['id']
+ instance_state = instance_ref['state']
+ expected_state = power_state.RUNNING
+ if instance_state != expected_state:
+ LOG.warn(_('trying to update agent on a non-running '
+ 'instance: %(instance_id)s (state: %(instance_state)s '
+ 'expected: %(expected_state)s)') % locals())
+ nm = instance_ref['name']
+ msg = _('instance %(nm)s: updating agent to %(url)s') % locals()
+ LOG.audit(msg)
+ self.driver.agent_update(instance_ref, url, md5hash)
+
+ @exception.wrap_exception
+ @checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
context = context.elevated()
diff --git a/nova/db/api.py b/nova/db/api.py
index 935ce814d..b372ac6a7 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -410,6 +410,11 @@ def virtual_interface_create(context, values):
return IMPL.virtual_interface_create(context, values)
+def virtual_interface_update(context, vif_id, values):
+ """Update a virtual interface record in the database."""
+ return IMPL.virtual_interface_update(context, vif_id, values)
+
+
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table,"""
return IMPL.virtual_interface_get(context, vif_id)
@@ -1079,6 +1084,29 @@ def security_group_rule_destroy(context, security_group_rule_id):
###################
+def provider_fw_rule_create(context, rule):
+ """Add a firewall rule at the provider level (all hosts & instances)."""
+ return IMPL.provider_fw_rule_create(context, rule)
+
+
+def provider_fw_rule_get_all(context):
+ """Get all provider-level firewall rules."""
+ return IMPL.provider_fw_rule_get_all(context)
+
+
+def provider_fw_rule_get_all_by_cidr(context, cidr):
+ """Get all provider-level firewall rules."""
+ return IMPL.provider_fw_rule_get_all_by_cidr(context, cidr)
+
+
+def provider_fw_rule_destroy(context, rule_id):
+ """Delete a provider firewall rule from the database."""
+ return IMPL.provider_fw_rule_destroy(context, rule_id)
+
+
+###################
+
+
def user_get(context, id):
"""Get user by id."""
return IMPL.user_get(context, id)
@@ -1341,3 +1369,32 @@ def instance_metadata_delete(context, instance_id, key):
def instance_metadata_update_or_create(context, instance_id, metadata):
"""Create or update instance metadata."""
IMPL.instance_metadata_update_or_create(context, instance_id, metadata)
+
+
+####################
+
+
+def agent_build_create(context, values):
+ """Create a new agent build entry."""
+ return IMPL.agent_build_create(context, values)
+
+
+def agent_build_get_by_triple(context, hypervisor, os, architecture):
+ """Get agent build by hypervisor/OS/architecture triple."""
+ return IMPL.agent_build_get_by_triple(context, hypervisor, os,
+ architecture)
+
+
+def agent_build_get_all(context):
+ """Get all agent builds."""
+ return IMPL.agent_build_get_all(context)
+
+
+def agent_build_destroy(context, agent_update_id):
+ """Destroy agent build entry."""
+ IMPL.agent_build_destroy(context, agent_update_id)
+
+
+def agent_build_update(context, agent_build_id, values):
+ """Update agent build entry."""
+ IMPL.agent_build_update(context, agent_build_id, values)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 3cb35b649..d13efb874 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -675,7 +675,6 @@ def fixed_ip_disassociate(context, address):
address,
session=session)
fixed_ip_ref.instance = None
- fixed_ip_ref.virtual_interface = None
fixed_ip_ref.save(session=session)
@@ -691,7 +690,6 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
filter(models.FixedIp.instance_id != None).\
filter_by(allocated=0).\
update({'instance_id': None,
- 'virtual_interface_id': None,
'leased': 0,
'updated_at': utils.utcnow()},
synchronize_session='fetch')
@@ -702,7 +700,9 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
def fixed_ip_get_all(context, session=None):
if not session:
session = get_session()
- result = session.query(models.FixedIp).all()
+ result = session.query(models.FixedIp).\
+ options(joinedload('floating_ips')).\
+ all()
if not result:
raise exception.NoFixedIpsDefined()
@@ -714,10 +714,11 @@ def fixed_ip_get_all_by_host(context, host=None):
session = get_session()
result = session.query(models.FixedIp).\
- join(models.FixedIp.instance).\
- filter_by(state=1).\
- filter_by(host=host).\
- all()
+ options(joinedload('floating_ips')).\
+ join(models.FixedIp.instance).\
+ filter_by(state=1).\
+ filter_by(host=host).\
+ all()
if not result:
raise exception.NoFixedIpsDefinedForHost(host=host)
@@ -732,6 +733,7 @@ def fixed_ip_get_by_address(context, address, session=None):
result = session.query(models.FixedIp).\
filter_by(address=address).\
filter_by(deleted=can_read_deleted(context)).\
+ options(joinedload('floating_ips')).\
options(joinedload('network')).\
options(joinedload('instance')).\
first()
@@ -745,15 +747,10 @@ def fixed_ip_get_by_address(context, address, session=None):
@require_context
-def fixed_ip_get_instance(context, address):
- fixed_ip_ref = fixed_ip_get_by_address(context, address)
- return fixed_ip_ref.instance
-
-
-@require_context
def fixed_ip_get_by_instance(context, instance_id):
session = get_session()
rv = session.query(models.FixedIp).\
+ options(joinedload('floating_ips')).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False).\
all()
@@ -766,6 +763,7 @@ def fixed_ip_get_by_instance(context, instance_id):
def fixed_ip_get_by_virtual_interface(context, vif_id):
session = get_session()
rv = session.query(models.FixedIp).\
+ options(joinedload('floating_ips')).\
filter_by(virtual_interface_id=vif_id).\
filter_by(deleted=False).\
all()
@@ -775,6 +773,12 @@ def fixed_ip_get_by_virtual_interface(context, vif_id):
@require_context
+def fixed_ip_get_instance(context, address):
+ fixed_ip_ref = fixed_ip_get_by_address(context, address)
+ return fixed_ip_ref.instance
+
+
+@require_context
def fixed_ip_get_instance_v6(context, address):
session = get_session()
@@ -827,12 +831,29 @@ def virtual_interface_create(context, values):
@require_context
-def virtual_interface_get(context, vif_id):
+def virtual_interface_update(context, vif_id, values):
+ """Update a virtual interface record in the database.
+
+ :param vif_id: = id of virtual interface to update
+ :param values: = values to update
+ """
+ session = get_session()
+ with session.begin():
+ vif_ref = virtual_interface_get(context, vif_id, session=session)
+ vif_ref.update(values)
+ vif_ref.save(session=session)
+ return vif_ref
+
+
+@require_context
+def virtual_interface_get(context, vif_id, session=None):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
- session = get_session()
+ if not session:
+ session = get_session()
+
vif_ref = session.query(models.VirtualInterface).\
filter_by(id=vif_id).\
options(joinedload('network')).\
@@ -927,12 +948,9 @@ def virtual_interface_delete(context, vif_id):
:param vif_id: = id of vif to delete
"""
- vif_ref = virtual_interface_get(context, vif_id)
session = get_session()
+ vif_ref = virtual_interface_get(context, vif_id, session)
with session.begin():
- # disassociate any fixed_ips from this interface
- for fixed_ip in vif_ref['fixed_ips']:
- fixed_ip.virtual_interface = None
session.delete(vif_ref)
@@ -945,7 +963,7 @@ def virtual_interface_delete_by_instance(context, instance_id):
"""
vif_refs = virtual_interface_get_by_instance(context, instance_id)
for vif_ref in vif_refs:
- virtual_interface_delete(vif_ref['id'])
+ virtual_interface_delete(context, vif_ref['id'])
###################
@@ -1564,6 +1582,7 @@ def network_get_associated_fixed_ips(context, network_id):
options(joinedload_all('instance')).\
filter_by(network_id=network_id).\
filter(models.FixedIp.instance_id != None).\
+ filter(models.FixedIp.virtual_interface_id != None).\
filter_by(deleted=False).\
all()
@@ -2395,6 +2414,45 @@ def security_group_rule_destroy(context, security_group_rule_id):
@require_admin_context
+def provider_fw_rule_create(context, rule):
+ fw_rule_ref = models.ProviderFirewallRule()
+ fw_rule_ref.update(rule)
+ fw_rule_ref.save()
+ return fw_rule_ref
+
+
+@require_admin_context
+def provider_fw_rule_get_all(context):
+ session = get_session()
+ return session.query(models.ProviderFirewallRule).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_admin_context
+def provider_fw_rule_get_all_by_cidr(context, cidr):
+ session = get_session()
+ return session.query(models.ProviderFirewallRule).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(cidr=cidr).\
+ all()
+
+
+@require_admin_context
+def provider_fw_rule_destroy(context, rule_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.ProviderFirewallRule).\
+ filter_by(id=rule_id).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+###################
+
+
+@require_admin_context
def user_get(context, id, session=None):
if not session:
session = get_session()
@@ -3018,3 +3076,54 @@ def instance_metadata_update_or_create(context, instance_id, metadata):
meta_ref.save(session=session)
return metadata
+
+
+@require_admin_context
+def agent_build_create(context, values):
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(values)
+ agent_build_ref.save()
+ return agent_build_ref
+
+
+@require_admin_context
+def agent_build_get_by_triple(context, hypervisor, os, architecture,
+ session=None):
+ if not session:
+ session = get_session()
+ return session.query(models.AgentBuild).\
+ filter_by(hypervisor=hypervisor).\
+ filter_by(os=os).\
+ filter_by(architecture=architecture).\
+ filter_by(deleted=False).\
+ first()
+
+
+@require_admin_context
+def agent_build_get_all(context):
+ session = get_session()
+ return session.query(models.AgentBuild).\
+ filter_by(deleted=False).\
+ all()
+
+
+@require_admin_context
+def agent_build_destroy(context, agent_build_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.AgentBuild).\
+ filter_by(id=agent_build_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_admin_context
+def agent_build_update(context, agent_build_id, values):
+ session = get_session()
+ with session.begin():
+ agent_build_ref = session.query(models.AgentBuild).\
+ filter_by(id=agent_build_id). \
+ first()
+ agent_build_ref.update(values)
+ agent_build_ref.save(session=session)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py
new file mode 100644
index 000000000..640e96138
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py
@@ -0,0 +1,73 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime, Integer
+from sqlalchemy import MetaData, String, Table
+from nova import log as logging
+
+meta = MetaData()
+
+#
+# New Tables
+#
+builds = Table('agent_builds', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('hypervisor',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('os',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('architecture',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('version',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('md5hash',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+#
+# New Column
+#
+
+architecture = Column('architecture', String(length=255))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (builds, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ # Add columns to existing tables
+ instances.create_column(architecture)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py
new file mode 100644
index 000000000..5aa30f7a8
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py
@@ -0,0 +1,75 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+services = Table('services', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+networks = Table('networks', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+provider_fw_rules = Table('provider_fw_rules', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('protocol',
+ String(length=5, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('from_port', Integer()),
+ Column('to_port', Integer()),
+ Column('cidr',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False))
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (provider_fw_rules,):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/023_multi_nic.py b/nova/db/sqlalchemy/migrate_repo/versions/028_multi_nic.py
index 85ab1fdd8..48fb4032f 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/023_multi_nic.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/028_multi_nic.py
@@ -114,8 +114,8 @@ def upgrade(migrate_engine):
fixed_ips.c.instance_id != None)
for row in s.execute():
- m = select([virtual_interfaces.c.id].\
- where(virtual_interfaces.c.instance_id == row['instance_id'])).\
+ m = select([virtual_interfaces.c.id]).\
+ where(virtual_interfaces.c.instance_id == row['instance_id']).\
as_scalar()
u = fixed_ips.update().values(virtual_interface_id=m).\
where(fixed_ips.c.id == row['id'])
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/024_fk_fixed_ips_virtual_interface_id.py b/nova/db/sqlalchemy/migrate_repo/versions/029_fk_fixed_ips_virtual_interface_id.py
index 56e927717..56e927717 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/024_fk_fixed_ips_virtual_interface_id.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/029_fk_fixed_ips_virtual_interface_id.py
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/024_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/029_sqlite_downgrade.sql
index c1d26b180..c1d26b180 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/024_sqlite_downgrade.sql
+++ b/nova/db/sqlalchemy/migrate_repo/versions/029_sqlite_downgrade.sql
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/024_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/029_sqlite_update.sql
index 2a9362545..2a9362545 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/024_sqlite_upgrade.sql
+++ b/nova/db/sqlalchemy/migrate_repo/versions/029_sqlite_update.sql
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index ae2d2c23b..7f02ac532 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -232,6 +232,7 @@ class Instance(BASE, NovaBase):
locked = Column(Boolean)
os_type = Column(String(255))
+ architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36))
@@ -492,6 +493,17 @@ class SecurityGroupIngressRule(BASE, NovaBase):
group_id = Column(Integer, ForeignKey('security_groups.id'))
+class ProviderFirewallRule(BASE, NovaBase):
+ """Represents a rule in a security group."""
+ __tablename__ = 'provider_fw_rules'
+ id = Column(Integer, primary_key=True)
+
+ protocol = Column(String(5)) # "tcp", "udp", or "icmp"
+ from_port = Column(Integer)
+ to_port = Column(Integer)
+ cidr = Column(String(255))
+
+
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
@@ -584,7 +596,10 @@ class FixedIp(BASE, NovaBase):
primaryjoin='and_('
'FixedIp.instance_id == Instance.id,'
'FixedIp.deleted == False)')
+ # associated means that a fixed_ip has its instance_id column set
+ # allocated means that a fixed_ip has a its virtual_interface_id column set
allocated = Column(Boolean, default=False)
+ # leased means dhcp bridge has leased the ip
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
@@ -729,6 +744,18 @@ class Zone(BASE, NovaBase):
password = Column(String(255))
+class AgentBuild(BASE, NovaBase):
+ """Represents an agent build."""
+ __tablename__ = 'agent_builds'
+ id = Column(Integer, primary_key=True)
+ hypervisor = Column(String(255))
+ os = Column(String(255))
+ architecture = Column(String(255))
+ version = Column(String(255))
+ url = Column(String(255))
+ md5hash = Column(String(255))
+
+
def register_models():
"""Register Models and create metadata.
@@ -742,7 +769,7 @@ def register_models():
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
- InstanceMetadata, Migration)
+ AgentBuild, InstanceMetadata, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 6e058ab2f..55d948a32 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -59,7 +59,7 @@ class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
GLANCE_ONLY_ATTRS = ['size', 'location', 'disk_format',
- 'container_format']
+ 'container_format', 'checksum']
# NOTE(sirp): Overriding to use _translate_to_service provided by
# BaseImageService
diff --git a/nova/network/api.py b/nova/network/api.py
index a43e76d2a..39d468a92 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -107,7 +107,7 @@ class API(base.Base):
return
if not floating_ip.get('fixed_ip'):
raise exception.ApiError('Address is not associated.')
- host = floating_ip['host']
+ host = floating_ip['fixed_ip']['network']['host']
rpc.call(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
{'method': 'disassociate_floating_ip',
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 4b998fbba..283a5aca1 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -20,6 +20,7 @@
import calendar
import inspect
+import netaddr
import os
from nova import db
@@ -27,7 +28,6 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
-from IPy import IP
LOG = logging.getLogger("nova.linux_net")
@@ -191,6 +191,13 @@ class IptablesTable(object):
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
+ def empty_chain(self, chain, wrap=True):
+ """Remove all rules from a chain."""
+ chained_rules = [rule for rule in self.rules
+ if rule.chain == chain and rule.wrap == wrap]
+ for rule in chained_rules:
+ self.rules.remove(rule)
+
class IptablesManager(object):
"""Wrapper for iptables.
@@ -700,7 +707,7 @@ def _dnsmasq_cmd(net):
'--listen-address=%s' % net['gateway'],
'--except-interface=lo',
'--dhcp-range=%s,static,120s' % net['dhcp_start'],
- '--dhcp-lease-max=%s' % IP(net['cidr']).len(),
+ '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(net['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
diff --git a/nova/network/manager.py b/nova/network/manager.py
index f51738643..b60e70990 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -40,17 +40,18 @@ topologies. All of the network commands are issued to a subclass of
is disassociated
:fixed_ip_disassociate_timeout: Seconds after which a deallocated ip
is disassociated
+:create_unique_mac_address_attempts: Number of times to attempt creating
+ a unique mac address
"""
import datetime
import math
+import netaddr
import socket
import pickle
from eventlet import greenpool
-import IPy
-
from nova import context
from nova import db
from nova import exception
@@ -101,6 +102,8 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False,
'Whether to update dhcp when fixed_ip is disassociated')
flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600,
'Seconds after which a deallocated ip is disassociated')
+flags.DEFINE_integer('create_unique_mac_address_attempts', 5,
+ 'Number of attempts to create unique mac address')
flags.DEFINE_bool('use_ipv6', False,
'use the ipv6')
@@ -158,8 +161,12 @@ class FloatingIP(object):
"""Configures floating ips owned by host."""
admin_context = context.get_admin_context()
- floating_ips = self.db.floating_ip_get_all_by_host(admin_context,
- self.host)
+ try:
+ floating_ips = self.db.floating_ip_get_all_by_host(admin_context,
+ self.host)
+ except exception.NotFound:
+ return
+
for floating_ip in floating_ips:
if floating_ip.get('fixed_ip', None):
fixed_address = floating_ip['fixed_ip']['address']
@@ -375,13 +382,13 @@ class NetworkManager(manager.SchedulerDependentManager):
self.db.fixed_ip_get_by_instance(context, instance_id)
LOG.debug(_("network deallocation for instance |%s|"), instance_id,
context=context)
- # deallocate mac addresses
- self.db.virtual_interface_delete_by_instance(context, instance_id)
-
# deallocate fixed ips
for fixed_ip in fixed_ips:
self.deallocate_fixed_ip(context, fixed_ip['address'], **kwargs)
+ # deallocate vifs (mac addresses)
+ self.db.virtual_interface_delete_by_instance(context, instance_id)
+
def get_instance_nw_info(self, context, instance_id, instance_type_id):
"""Creates network info list for instance.
@@ -423,6 +430,8 @@ class NetworkManager(manager.SchedulerDependentManager):
network_dict = {
'bridge': network['bridge'],
'id': network['id'],
+ 'cidr': network['cidr'],
+ 'cidr_v6': network['cidr_v6'],
'injected': network['injected']}
info = {
'label': network['label'],
@@ -446,8 +455,8 @@ class NetworkManager(manager.SchedulerDependentManager):
vif = {'address': self.generate_mac_address(),
'instance_id': instance_id,
'network_id': network['id']}
- # try 5 times to create a vif record with a unique mac_address
- for i in range(5):
+ # try FLAG times to create a vif record with a unique mac_address
+ for i in range(FLAGS.create_unique_mac_address_attempts):
try:
self.db.virtual_interface_create(context, vif)
break
@@ -490,41 +499,37 @@ class NetworkManager(manager.SchedulerDependentManager):
def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
- self.db.fixed_ip_update(context, address, {'allocated': False})
+ self.db.fixed_ip_update(context, address,
+ {'allocated': False,
+ 'virtual_interface_id': None})
def lease_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is leased."""
- LOG.debug(_('Leasing IP %s'), address, context=context)
+ LOG.debug(_('Leased IP |%(address)s| to mac |%(mac)s|'), locals(),
+ context=context)
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
instance = fixed_ip['instance']
if not instance:
raise exception.Error(_('IP %s leased that is not associated') %
address)
- mac_address = fixed_ip['virtual_interface']['address']
- if mac_address != mac:
- raise exception.Error(_('IP %(address)s leased to bad'
- ' mac %(mac_address)s vs %(mac)s') % locals())
now = utils.utcnow()
self.db.fixed_ip_update(context,
fixed_ip['address'],
{'leased': True,
'updated_at': now})
if not fixed_ip['allocated']:
- LOG.warn(_('IP %s leased that was already deallocated'), address,
+ LOG.warn(_('IP |%s| leased that isn\'t allocated'), address,
context=context)
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
- LOG.debug(_('Releasing IP %s'), address, context=context)
+ LOG.debug(_('Released IP |%(address)s| from mac |%(mac)s|'), locals(),
+ context=context)
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
instance = fixed_ip['instance']
if not instance:
raise exception.Error(_('IP %s released that is not associated') %
address)
- mac_address = fixed_ip['virtual_interface']['address']
- if mac_address != mac:
- raise exception.Error(_('IP %(address)s released from'
- ' bad mac %(mac_address)s vs %(mac)s') % locals())
if not fixed_ip['leased']:
LOG.warn(_('IP %s released that was not leased'), address,
context=context)
@@ -544,8 +549,8 @@ class NetworkManager(manager.SchedulerDependentManager):
network_size, cidr_v6, gateway_v6, bridge,
bridge_interface, **kwargs):
"""Create networks based on parameters."""
- fixed_net = IPy.IP(cidr)
- fixed_net_v6 = IPy.IP(cidr_v6)
+ fixed_net = netaddr.IPNetwork(cidr)
+ fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
significant_bits_v6 = 64
network_size_v6 = 1 << 64
for index in range(num_networks):
@@ -553,16 +558,16 @@ class NetworkManager(manager.SchedulerDependentManager):
start_v6 = index * network_size_v6
significant_bits = 32 - int(math.log(network_size, 2))
cidr = '%s/%s' % (fixed_net[start], significant_bits)
- project_net = IPy.IP(cidr)
+ project_net = netaddr.IPNetwork(cidr)
net = {}
net['bridge'] = bridge
net['bridge_interface'] = bridge_interface
net['dns'] = FLAGS.flat_network_dns
net['cidr'] = cidr
- net['netmask'] = str(project_net.netmask())
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast())
- net['dhcp_start'] = str(project_net[2])
+ net['netmask'] = str(project_net.netmask)
+ net['gateway'] = str(list(project_net)[1])
+ net['broadcast'] = str(project_net.broadcast)
+ net['dhcp_start'] = str(list(project_net)[2])
if num_networks > 1:
net['label'] = '%s_%d' % (label, index)
else:
@@ -572,15 +577,16 @@ class NetworkManager(manager.SchedulerDependentManager):
cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
significant_bits_v6)
net['cidr_v6'] = cidr_v6
- project_net_v6 = IPy.IP(cidr_v6)
+
+ project_net_v6 = netaddr.IPNetwork(cidr_v6)
if gateway_v6:
# use a pre-defined gateway if one is provided
- net['gateway_v6'] = str(gateway_v6)
+ net['gateway_v6'] = str(list(gateway_v6)[1])
else:
- net['gateway_v6'] = str(project_net_v6[1])
+ net['gateway_v6'] = str(list(project_net_v6)[1])
- net['netmask_v6'] = str(project_net_v6.prefixlen())
+ net['netmask_v6'] = str(project_net_v6._prefixlen)
if kwargs.get('vpn', False):
# this bit here is for vlan-manager
@@ -621,7 +627,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# to properties of the manager class?
bottom_reserved = self._bottom_reserved_ips
top_reserved = self._top_reserved_ips
- project_net = IPy.IP(network['cidr'])
+ project_net = netaddr.IPNetwork(network['cidr'])
num_ips = len(project_net)
for index in range(num_ips):
address = str(project_net[index])
@@ -839,8 +845,8 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
' than 4094'))
# check that num networks and network size fits in fixed_net
- fixed_net = IPy.IP(kwargs['cidr'])
- if fixed_net.len() < kwargs['num_networks'] * kwargs['network_size']:
+ fixed_net = netaddr.IPNetwork(kwargs['cidr'])
+ if len(fixed_net) < kwargs['num_networks'] * kwargs['network_size']:
raise ValueError(_('The network range is not big enough to fit '
'%(num_networks)s. Network size is %(network_size)s') %
kwargs)
diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py
index 34a598ead..e86f4017d 100644
--- a/nova/network/xenapi_net.py
+++ b/nova/network/xenapi_net.py
@@ -56,8 +56,10 @@ def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
'other_config': {}}
network_ref = session.call_xenapi('network.create', network_rec)
# 2 - find PIF for VLAN
- expr = "field 'device' = '%s' and \
- field 'VLAN' = '-1'" % bridge_interface
+ # NOTE(salvatore-orlando): using double quotes inside single quotes
+ # as xapi filter only support tokens in double quotes
+ expr = 'field "device" = "%s" and \
+ field "VLAN" = "-1"' % bridge_interface
pifs = session.call_xenapi('PIF.get_all_records_where', expr)
pif_ref = None
# Multiple PIF are ok: we are dealing with a pool
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index f8d158ddd..c74974b16 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -140,9 +140,10 @@ def stub_out_networking(stubs):
def stub_out_compute_api_snapshot(stubs):
- def snapshot(self, context, instance_id, name):
- return dict(id='123', status='ACTIVE',
- properties=dict(instance_id='123'))
+ def snapshot(self, context, instance_id, name, extra_properties=None):
+ props = dict(instance_id=instance_id, instance_ref=instance_id)
+ props.update(extra_properties or {})
+ return dict(id='123', status='ACTIVE', name=name, properties=props)
stubs.Set(nova.compute.API, 'snapshot', snapshot)
diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py
index 56be0f1cc..730af3665 100644
--- a/nova/tests/api/openstack/test_image_metadata.py
+++ b/nova/tests/api/openstack/test_image_metadata.py
@@ -19,6 +19,7 @@ import json
import stubout
import unittest
import webob
+import xml.dom.minidom as minidom
from nova import flags
@@ -37,6 +38,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image1',
'deleted': False,
'container_format': None,
+ 'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -52,6 +54,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image2',
'deleted': False,
'container_format': None,
+ 'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -67,6 +70,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image3',
'deleted': False,
'container_format': None,
+ 'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -103,7 +107,34 @@ class ImageMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
- self.assertEqual('value1', res_dict['metadata']['key1'])
+ expected = self.IMAGE_FIXTURES[0]['properties']
+ self.assertEqual(len(expected), len(res_dict['metadata']))
+ for (key, value) in res_dict['metadata'].items():
+ self.assertEqual(value, res_dict['metadata'][key])
+
+ def test_index_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'one': 'two',
+ 'three': 'four',
+ },
+ }
+ output = serializer.index(fixture)
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="three">
+ four
+ </meta>
+ <meta key="one">
+ two
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
def test_show(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
@@ -111,13 +142,32 @@ class ImageMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
- self.assertEqual('value1', res_dict['key1'])
+ self.assertTrue('meta' in res_dict)
+ self.assertEqual(len(res_dict['meta']), 1)
+ self.assertEqual('value1', res_dict['meta']['key1'])
+
+ def test_show_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.show(fixture)
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
+ two
+ </meta>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
def test_show_not_found(self):
req = webob.Request.blank('/v1.1/images/1/meta/key9')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_create(self):
@@ -135,22 +185,79 @@ class ImageMetaDataTest(unittest.TestCase):
self.assertEqual('value2', res_dict['metadata']['key2'])
self.assertEqual(1, len(res_dict))
+ def test_create_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'key9': 'value9',
+ 'key2': 'value2',
+ 'key1': 'value1',
+ },
+ }
+ output = serializer.create(fixture)
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="key2">
+ value2
+ </meta>
+ <meta key="key9">
+ value9
+ </meta>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
def test_update_item(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
- req.body = '{"key1": "zz"}'
+ req.body = '{"meta": {"key1": "zz"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
- self.assertEqual('zz', res_dict['key1'])
+ self.assertTrue('meta' in res_dict)
+ self.assertEqual(len(res_dict['meta']), 1)
+ self.assertEqual('zz', res_dict['meta']['key1'])
+
+ def test_update_item_bad_body(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "zz"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.update(fixture)
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
+ two
+ </meta>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
def test_update_item_too_many_keys(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
- req.body = '{"key1": "value1", "key2": "value2"}'
+ req.body = '{"meta": {"key1": "value1", "key2": "value2"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
@@ -159,7 +266,7 @@ class ImageMetaDataTest(unittest.TestCase):
req = webob.Request.blank('/v1.1/images/1/meta/bad')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
- req.body = '{"key1": "value1"}'
+ req.body = '{"meta": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
@@ -195,7 +302,7 @@ class ImageMetaDataTest(unittest.TestCase):
req = webob.Request.blank('/v1.1/images/3/meta/blah')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
- req.body = '{"blah": "blah"}'
+ req.body = '{"meta": {"blah": "blah"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index e4204809f..446d68e9e 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -618,7 +618,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 124,
'name': 'queued backup',
- 'serverId': 42,
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
@@ -626,7 +625,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 125,
'name': 'saving backup',
- 'serverId': 42,
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -635,7 +633,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 126,
'name': 'active backup',
- 'serverId': 42,
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE'
@@ -643,7 +640,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 127,
'name': 'killed backup',
- 'serverId': 42,
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -689,7 +685,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 124,
'name': 'queued backup',
- 'serverRef': "http://localhost/v1.1/servers/42",
+ 'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
@@ -711,7 +707,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 125,
'name': 'saving backup',
- 'serverRef': "http://localhost/v1.1/servers/42",
+ 'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -734,7 +730,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 126,
'name': 'active backup',
- 'serverRef': "http://localhost/v1.1/servers/42",
+ 'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -756,7 +752,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 127,
'name': 'killed backup',
- 'serverRef': "http://localhost/v1.1/servers/42",
+ 'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -1002,6 +998,30 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
+ def test_create_image_v1_1_actual_server_ref(self):
+
+ serverRef = 'http://localhost/v1.1/servers/1'
+ body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+ result = json.loads(response.body)
+ self.assertEqual(result['image']['serverRef'], serverRef)
+
+ def test_create_image_v1_1_server_ref_bad_hostname(self):
+
+ serverRef = 'http://asdf/v1.1/servers/1'
+ body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
def test_create_image_v1_1_xml_serialization(self):
body = dict(image=dict(serverRef='123', name='Backup 1'))
@@ -1018,7 +1038,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
<image
created="None"
id="123"
- name="None"
+ name="Backup 1"
serverRef="http://localhost/v1.1/servers/123"
status="ACTIVE"
updated="None"
@@ -1065,7 +1085,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_id += 1
# Backup for User 1
- backup_properties = {'instance_id': '42', 'user_id': '1'}
+ server_ref = 'http://localhost:8774/v1.1/servers/42'
+ backup_properties = {'instance_ref': server_ref, 'user_id': '1'}
for status in ('queued', 'saving', 'active', 'killed'):
add_fixture(id=image_id, name='%s backup' % status,
is_public=False, status=status,
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
index 01613d1d8..38c959fae 100644
--- a/nova/tests/api/openstack/test_limits.py
+++ b/nova/tests/api/openstack/test_limits.py
@@ -672,8 +672,7 @@ class WsgiLimiterTest(BaseLimitTestSuite):
"""Only POSTs should work."""
requests = []
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
- request = webob.Request.blank("/")
- request.body = self._request_data("GET", "/something")
+ request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 405)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index be9ecab1f..e318b7c57 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -1504,7 +1504,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 400)
def test_resized_server_has_correct_status(self):
- req = self.webreq('/1', 'GET', dict(resize=dict(flavorId=3)))
+ req = self.webreq('/1', 'GET')
def fake_migration_get(*args):
return {}
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
index 2fa50ac9b..73a26a087 100644
--- a/nova/tests/api/openstack/test_wsgi.py
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -10,13 +10,13 @@ from nova.api.openstack import wsgi
class RequestTest(test.TestCase):
def test_content_type_missing(self):
- request = wsgi.Request.blank('/tests/123')
+ request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
def test_content_type_unsupported(self):
- request = wsgi.Request.blank('/tests/123')
+ request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "asdf<br />"
self.assertRaises(exception.InvalidContentType,
diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py
new file mode 100644
index 000000000..877cf4ea1
--- /dev/null
+++ b/nova/tests/test_adminapi.py
@@ -0,0 +1,107 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import greenthread
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.api.ec2 import admin
+from nova.image import fake
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.adminapi')
+
+
+class AdminApiTestCase(test.TestCase):
+ def setUp(self):
+ super(AdminApiTestCase, self).setUp()
+ self.flags(connection_type='fake')
+
+ self.conn = rpc.Connection.instance()
+
+ # set up our cloud
+ self.api = admin.AdminController()
+
+ # set up services
+ self.compute = self.start_service('compute')
+ self.scheduter = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.volume = self.start_service('volume')
+ self.image_service = utils.import_object(FLAGS.image_service)
+
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('admin', 'admin', 'admin', True)
+ self.project = self.manager.create_project('proj', 'admin', 'proj')
+ self.context = context.RequestContext(user=self.user,
+ project=self.project)
+
+ def fake_show(meh, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ 'type': 'machine', 'image_state': 'available'}}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
+
+ # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
+ rpc_cast = rpc.cast
+
+ def finish_cast(*args, **kwargs):
+ rpc_cast(*args, **kwargs)
+ greenthread.sleep(0.2)
+
+ self.stubs.Set(rpc, 'cast', finish_cast)
+
+ def tearDown(self):
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ super(AdminApiTestCase, self).tearDown()
+
+ def test_block_external_ips(self):
+ """Make sure provider firewall rules are created."""
+ result = self.api.block_external_addresses(self.context, '1.1.1.1/32')
+ self.api.remove_external_address_block(self.context, '1.1.1.1/32')
+ self.assertEqual('OK', result['status'])
+ self.assertEqual('Added 3 rules', result['message'])
+
+ def test_list_blocked_ips(self):
+ """Make sure we can see the external blocks that exist."""
+ self.api.block_external_addresses(self.context, '1.1.1.2/32')
+ result = self.api.describe_external_address_blocks(self.context)
+ num = len(db.provider_fw_rule_get_all(self.context))
+ self.api.remove_external_address_block(self.context, '1.1.1.2/32')
+ # we only list IP, not tcp/udp/icmp rules
+ self.assertEqual(num / 3, len(result['externalIpBlockInfo']))
+
+ def test_remove_ip_block(self):
+ """Remove ip blocks."""
+ result = self.api.block_external_addresses(self.context, '1.1.1.3/32')
+ self.assertEqual('OK', result['status'])
+ num0 = len(db.provider_fw_rule_get_all(self.context))
+ result = self.api.remove_external_address_block(self.context,
+ '1.1.1.3/32')
+ self.assertEqual('OK', result['status'])
+ self.assertEqual('Deleted 3 rules', result['message'])
+ num1 = len(db.provider_fw_rule_get_all(self.context))
+ self.assert_(num1 < num0)
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 968fc4986..8b90f361c 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -637,6 +637,7 @@ class CloudTestCase(test.TestCase):
elevated = self.context.elevated(read_deleted=True)
self._wait_for_state(elevated, instance_id, is_deleted)
+ @test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_start_instance(self):
"""Makes sure stop/start instance works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
@@ -692,6 +693,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
+ @test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_start_with_volume(self):
"""Make sure run instance with block device mapping works"""
@@ -760,6 +762,7 @@ class CloudTestCase(test.TestCase):
self._restart_compute_service()
+ @test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_with_attached_volume(self):
"""Make sure attach info is reflected to block device mapping"""
# enforce periodic tasks run in short time to avoid wait for 60s.
@@ -835,6 +838,7 @@ class CloudTestCase(test.TestCase):
greenthread.sleep(0.3)
return result['snapshotId']
+ @test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_run_with_snapshot(self):
"""Makes sure run/stop/start instance with snapshot works."""
vol = self._volume_create()
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index dc270c345..9f3dc29cb 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -280,6 +280,14 @@ class ComputeTestCase(test.TestCase):
"File Contents")
self.compute.terminate_instance(self.context, instance_id)
+ def test_agent_update(self):
+ """Ensure instance can have its agent updated"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.agent_update(self.context, instance_id,
+ 'http://127.0.0.1/agent', '00112233445566778899aabbccddeeff')
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance_id = self._create_instance()
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 43ab406a0..5a081ddf7 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -54,12 +54,12 @@ def _create_network_info(count=1, ipv6=None):
fake_ip = '0.0.0.0/0'
fake_ip_2 = '0.0.0.1/0'
fake_ip_3 = '0.0.0.1/0'
- network = {'gateway': fake,
- 'gateway_v6': fake,
- 'bridge': fake,
+ network = {'bridge': fake,
'cidr': fake_ip,
'cidr_v6': fake_ip}
mapping = {'mac': fake,
+ 'gateway': fake,
+ 'gateway6': fake,
'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
if ipv6:
mapping['ip6s'] = [{'ip': fake_ip},
@@ -757,8 +757,8 @@ class LibvirtConnTestCase(test.TestCase):
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
- network = db.project_get_network(context.get_admin_context(),
- self.project.id)
+ network = db.project_get_networks(context.get_admin_context(),
+ self.project.id)[0]
ip_dict = {'ip': self.test_ip,
'netmask': network['netmask'],
'enabled': '1'}
@@ -821,7 +821,9 @@ class IptablesFirewallTestCase(test.TestCase):
self.network = utils.import_object(FLAGS.network_manager)
class FakeLibvirtConnection(object):
- pass
+ def nwfilterDefineXML(*args, **kwargs):
+ """setup_basic_rules in nwfilter calls this."""
+ pass
self.fake_libvirt_connection = FakeLibvirtConnection()
self.fw = firewall.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
@@ -1051,6 +1053,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.mox.ReplayAll()
self.fw.do_refresh_security_group_rules("fake")
+ @test.skip_test("skip libvirt test project_get_network no longer exists")
def test_unfilter_instance_undefines_nwfilter(self):
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
@@ -1063,7 +1066,6 @@ class IptablesFirewallTestCase(test.TestCase):
fakefilter.filterDefineXMLMock
self.fw.nwfilter._conn.nwfilterLookupByName =\
fakefilter.nwfilterLookupByName
-
instance_ref = self._create_instance_ref()
inst_id = instance_ref['id']
instance = db.instance_get(self.context, inst_id)
@@ -1085,6 +1087,71 @@ class IptablesFirewallTestCase(test.TestCase):
db.instance_destroy(admin_ctxt, instance_ref['id'])
+ @test.skip_test("skip libvirt test project_get_network no longer exists")
+ def test_provider_firewall_rules(self):
+ # setup basic instance data
+ instance_ref = self._create_instance_ref()
+ nw_info = _create_network_info(1)
+ ip = '10.11.12.13'
+ network_ref = db.project_get_network(self.context, 'fake')
+ admin_ctxt = context.get_admin_context()
+ fixed_ip = {'address': ip, 'network_id': network_ref['id']}
+ db.fixed_ip_create(admin_ctxt, fixed_ip)
+ db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ 'instance_id': instance_ref['id']})
+ # FRAGILE: peeks at how the firewall names chains
+ chain_name = 'inst-%s' % instance_ref['id']
+
+ # create a firewall via setup_basic_filtering like libvirt_conn.spawn
+ # should have a chain with 0 rules
+ self.fw.setup_basic_filtering(instance_ref, network_info=nw_info)
+ self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(0, len(rules))
+
+ # add a rule and send the update message, check for 1 rule
+ provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+ # Add another, refresh, and make sure number of rules goes to two
+ provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(2, len(rules))
+
+ # create the instance filter and make sure it has a jump rule
+ self.fw.prepare_instance_filter(instance_ref, network_info=nw_info)
+ self.fw.apply_instance_filter(instance_ref)
+ inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == chain_name]
+ jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
+ provjump_rules = []
+ # IptablesTable doesn't make rules unique internally
+ for rule in jump_rules:
+ if 'provider' in rule.rule and rule not in provjump_rules:
+ provjump_rules.append(rule)
+ self.assertEqual(1, len(provjump_rules))
+
+ # remove a rule from the db, cast to compute to refresh rule
+ db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
class NWFilterTestCase(test.TestCase):
def setUp(self):
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 68469f1f7..af7f7f338 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -37,9 +37,8 @@ from nova import exception
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import vmops
from nova.virt.xenapi import vm_utils
-from nova.virt.xenapi.vmops import SimpleDH
-from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
@@ -84,7 +83,8 @@ class XenAPIVolumeTestCase(test.TestCase):
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
def _create_volume(self, size='0'):
"""Create a volume object."""
@@ -191,7 +191,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
- self.stubs.Set(VMOps, 'reset_network', reset_network)
+ self.stubs.Set(vmops.VMOps, 'reset_network', reset_network)
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
@@ -210,7 +210,8 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
@@ -382,7 +383,8 @@ class XenAPIVMTestCase(test.TestCase):
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
- instance_id=1, check_injection=False, create_record=True):
+ architecture="x86-64", instance_id=1,
+ check_injection=False):
stubs.stubout_loopingcall_start(self.stubs)
values = {'id': instance_id,
'project_id': self.project.id,
@@ -391,28 +393,28 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'instance_type_id': instance_type_id,
- 'os_type': os_type}
- if create_record:
- instance = db.instance_create(self.context, values)
- network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True},
- {'broadcast': '192.168.0.255',
- 'dns': ['192.168.0.1'],
- 'gateway': '192.168.0.1',
- 'gateway6': 'dead:beef::1',
- 'ip6s': [{'enabled': '1',
- 'ip': 'dead:beef::dcad:beff:feef:0',
- 'netmask': '64'}],
- 'ips': [{'enabled': '1',
- 'ip': '192.168.0.100',
- 'netmask': '255.255.255.0'}],
- 'label': 'fake',
- 'mac': 'DE:AD:BE:EF:00:00',
- 'rxtx_cap': 3})]
- self.conn.spawn(instance, network_info)
- else:
- instance = db.instance_get(self.context, instance_id)
+ 'os_type': os_type,
+ 'architecture': architecture}
+ instance = db.instance_create(self.context, values)
+ network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True},
+ {'broadcast': '192.168.0.255',
+ 'dns': ['192.168.0.1'],
+ 'gateway': '192.168.0.1',
+ 'gateway6': 'dead:beef::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': 'dead:beef::dcad:beff:feef:0',
+ 'netmask': '64'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.0.100',
+ 'netmask': '255.255.255.0'}],
+ 'label': 'fake',
+ 'mac': 'DE:AD:BE:EF:00:00',
+ 'rxtx_cap': 3})]
+ self.conn.spawn(instance, network_info)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
+ self.assertTrue(instance.os_type)
+ self.assertTrue(instance.architecture)
def test_spawn_not_enough_memory(self):
FLAGS.xenapi_image_service = 'glance'
@@ -437,7 +439,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_vhd_glance_linux(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
- os_type="linux")
+ os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_swapdisk(self):
@@ -466,7 +468,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_vhd_glance_windows(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
- os_type="windows")
+ os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
@@ -630,7 +632,8 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
instance = db.instance_create(self.context, values)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
@@ -654,8 +657,8 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
- self.alice = SimpleDH()
- self.bob = SimpleDH()
+ self.alice = vmops.SimpleDH()
+ self.bob = vmops.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
@@ -718,7 +721,8 @@ class XenAPIMigrateInstance(test.TestCase):
'ramdisk_id': None,
'local_gb': 5,
'instance_type_id': '3', # m1.large
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
@@ -772,6 +776,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
self.fake_instance.os_type = 'linux'
+ self.fake_instance.architecture = 'x86-64'
def assert_disk_type(self, disk_type):
dt = vm_utils.VMHelper.determine_disk_image_type(
@@ -816,6 +821,28 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
+class CompareVersionTestCase(test.TestCase):
+ def test_less_than(self):
+ """Test that cmp_version compares a as less than b"""
+ self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
+
+ def test_greater_than(self):
+ """Test that cmp_version compares a as greater than b"""
+ self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
+
+ def test_equal(self):
+ """Test that cmp_version compares a as equal to b"""
+ self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
+
+ def test_non_lexical(self):
+ """Test that cmp_version compares non-lexically"""
+ self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
+
+ def test_length(self):
+ """Test that cmp_version compares by length as last resort"""
+ self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
+
+
class FakeXenApi(object):
"""Fake XenApi for testing HostState."""
diff --git a/nova/utils.py b/nova/utils.py
index b49849f9a..4a66dd993 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -518,6 +518,16 @@ def loads(s):
return json.loads(s)
+try:
+ import anyjson
+except ImportError:
+ pass
+else:
+ anyjson._modules.append(("nova.utils", "dumps", TypeError,
+ "loads", ValueError))
+ anyjson.force_implementation("nova.utils")
+
+
_semaphores = {}
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 5c3d677fc..1c9797973 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -191,6 +191,10 @@ class ComputeDriver(object):
def refresh_security_group_members(self, security_group_id):
raise NotImplementedError()
+ def refresh_provider_fw_rules(self, security_group_id):
+ """See: nova/virt/fake.py for docs."""
+ raise NotImplementedError()
+
def reset_network(self, instance):
"""reset networking for specified instance"""
raise NotImplementedError()
@@ -234,6 +238,10 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
+ def agent_update(self, instance, url, md5hash):
+ """Update agent on the VM instance."""
+ raise NotImplementedError()
+
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance"""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 584ad32b3..5fe9d674f 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -225,6 +225,21 @@ class FakeConnection(driver.ComputeDriver):
"""
pass
+ def agent_update(self, instance, url, md5hash):
+ """
+ Update agent on the specified instance.
+
+ The first parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name. The second
+ parameter is the URL of the agent to be fetched and updated on the
+ instance; the third is the md5 hash of the file for verification
+ purposes.
+
+ The work will be done asynchronously. This function returns a
+ task that allows the caller to detect when it is complete.
+ """
+ pass
+
def rescue(self, instance):
"""
Rescue the specified instance.
@@ -451,6 +466,22 @@ class FakeConnection(driver.ComputeDriver):
"""
return True
+ def refresh_provider_fw_rules(self):
+ """This triggers a firewall update based on database changes.
+
+ When this is called, rules have either been added or removed from the
+ datastore. You can retrieve rules with
+ :method:`nova.db.api.provider_fw_rule_get_all`.
+
+ Provider rules take precedence over security group rules. If an IP
+ would be allowed by a security group ingress rule, but blocked by
+ a provider rule, then packets from the IP are dropped. This includes
+ intra-project traffic in the case of the allow_project_net_traffic
+ flag for the libvirt-derived classes.
+
+ """
+ pass
+
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 96ef92825..785a83584 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -38,6 +38,7 @@ Supports KVM, LXC, QEMU, UML, and XEN.
import hashlib
import multiprocessing
+import netaddr
import os
import random
import re
@@ -53,8 +54,6 @@ from xml.etree import ElementTree
from eventlet import greenthread
from eventlet import tpool
-import IPy
-
from nova import context
from nova import db
from nova import exception
@@ -771,8 +770,6 @@ class LibvirtConnection(driver.ComputeDriver):
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
network_info=None, block_device_mapping=None):
block_device_mapping = block_device_mapping or []
- if not network_info:
- network_info = netutils.get_network_info(inst)
if not suffix:
suffix = ''
@@ -881,18 +878,20 @@ class LibvirtConnection(driver.ComputeDriver):
have_injected_networks = True
address = mapping['ips'][0]['ip']
+ netmask = mapping['ips'][0]['netmask']
address_v6 = None
if FLAGS.use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
+ netmask_v6 = mapping['ip6s'][0]['netmask']
net_info = {'name': 'eth%d' % ifc_num,
'address': address,
- 'netmask': network_ref['netmask'],
- 'gateway': network_ref['gateway'],
- 'broadcast': network_ref['broadcast'],
- 'dns': network_ref['dns'],
+ 'netmask': netmask,
+ 'gateway': mapping['gateway'],
+ 'broadcast': mapping['broadcast'],
+ 'dns': mapping['dns'],
'address_v6': address_v6,
- 'gateway_v6': network_ref['gateway_v6'],
- 'netmask_v6': network_ref['netmask_v6']}
+ 'gateway6': mapping['gateway6'],
+ 'netmask_v6': netmask_v6}
nets.append(net_info)
if have_injected_networks:
@@ -928,8 +927,8 @@ class LibvirtConnection(driver.ComputeDriver):
def _get_nic_for_xml(self, network, mapping):
# Assume that the gateway also acts as the dhcp server.
- dhcp_server = network['gateway']
- gateway_v6 = network['gateway_v6']
+ dhcp_server = mapping['gateway']
+ gateway6 = mapping.get('gateway6')
mac_id = mapping['mac'].replace(':', '')
if FLAGS.allow_project_net_traffic:
@@ -955,8 +954,8 @@ class LibvirtConnection(driver.ComputeDriver):
'extra_params': extra_params,
}
- if gateway_v6:
- result['gateway_v6'] = gateway_v6 + "/128"
+ if gateway6:
+ result['gateway6'] = gateway6 + "/128"
return result
@@ -1383,6 +1382,9 @@ class LibvirtConnection(driver.ComputeDriver):
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
+ def refresh_provider_fw_rules(self):
+ self.firewall_driver.refresh_provider_fw_rules()
+
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 84153fa1e..379197398 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -76,6 +76,15 @@ class FirewallDriver(object):
the security group."""
raise NotImplementedError()
+ def refresh_provider_fw_rules(self):
+ """Refresh common rules for all hosts/instances from data store.
+
+ Gets called when a rule has been added to or removed from
+ the list of rules (via admin api).
+
+ """
+ raise NotImplementedError()
+
def setup_basic_filtering(self, instance, network_info=None):
"""Create rules to block spoofing and allow dhcp.
@@ -207,6 +216,13 @@ class NWFilterFirewall(FirewallDriver):
[base_filter]))
def _ensure_static_filters(self):
+ """Static filters are filters that have no need to be IP aware.
+
+ There is no configuration or tuneability of these filters, so they
+ can be set up once and forgotten about.
+
+ """
+
if self.static_filters_configured:
return
@@ -310,19 +326,21 @@ class NWFilterFirewall(FirewallDriver):
'for %(instance_name)s is not found.') % locals())
def prepare_instance_filter(self, instance, network_info=None):
- """
- Creates an NWFilter for the given instance. In the process,
- it makes sure the filters for the security groups as well as
- the base filter are all in place.
+ """Creates an NWFilter for the given instance.
+
+ In the process, it makes sure the filters for the provider blocks,
+ security groups, and base filter are all in place.
+
"""
if not network_info:
network_info = netutils.get_network_info(instance)
+ self.refresh_provider_fw_rules()
+
ctxt = context.get_admin_context()
instance_secgroup_filter_name = \
'%s-secgroup' % (self._instance_filter_name(instance))
- #% (instance_filter_name,)
instance_secgroup_filter_children = ['nova-base-ipv4',
'nova-base-ipv6',
@@ -366,7 +384,7 @@ class NWFilterFirewall(FirewallDriver):
for (_n, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
- instance_filter_children = [base_filter,
+ instance_filter_children = [base_filter, 'nova-provider-rules',
instance_secgroup_filter_name]
if FLAGS.allow_project_net_traffic:
@@ -388,6 +406,19 @@ class NWFilterFirewall(FirewallDriver):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
+ def refresh_provider_fw_rules(self):
+ """Update rules for all instances.
+
+ This is part of the FirewallDriver API and is called when the
+ provider firewall rules change in the database. In the
+ `prepare_instance_filter` we add a reference to the
+ 'nova-provider-rules' filter for each instance's firewall, and
+ by changing that filter we update them all.
+
+ """
+ xml = self.provider_fw_to_nwfilter_xml()
+ return self._define_filter(xml)
+
def security_group_to_nwfilter_xml(self, security_group_id):
security_group = db.security_group_get(context.get_admin_context(),
security_group_id)
@@ -426,6 +457,43 @@ class NWFilterFirewall(FirewallDriver):
xml += "chain='ipv4'>%s</filter>" % rule_xml
return xml
+ def provider_fw_to_nwfilter_xml(self):
+ """Compose a filter of drop rules from specified cidrs."""
+ rule_xml = ""
+ v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
+ rules = db.provider_fw_rule_get_all(context.get_admin_context())
+ for rule in rules:
+ rule_xml += "<rule action='block' direction='in' priority='150'>"
+ version = netutils.get_ip_version(rule.cidr)
+ if(FLAGS.use_ipv6 and version == 6):
+ net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (v6protocol[rule.protocol], net, prefixlen)
+ else:
+ net, mask = netutils.get_net_and_mask(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (rule.protocol, net, mask)
+ if rule.protocol in ['tcp', 'udp']:
+ rule_xml += "dstportstart='%s' dstportend='%s' " % \
+ (rule.from_port, rule.to_port)
+ elif rule.protocol == 'icmp':
+ LOG.info('rule.protocol: %r, rule.from_port: %r, '
+ 'rule.to_port: %r', rule.protocol,
+ rule.from_port, rule.to_port)
+ if rule.from_port != -1:
+ rule_xml += "type='%s' " % rule.from_port
+ if rule.to_port != -1:
+ rule_xml += "code='%s' " % rule.to_port
+
+ rule_xml += '/>\n'
+ rule_xml += "</rule>\n"
+ xml = "<filter name='nova-provider-rules' "
+ if(FLAGS.use_ipv6):
+ xml += "chain='root'>%s</filter>" % rule_xml
+ else:
+ xml += "chain='ipv4'>%s</filter>" % rule_xml
+ return xml
+
def _instance_filter_name(self, instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance['name'])
@@ -453,6 +521,7 @@ class IptablesFirewallDriver(FirewallDriver):
self.iptables = linux_net.iptables_manager
self.instances = {}
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
+ self.basicly_filtered = False
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
@@ -460,10 +529,14 @@ class IptablesFirewallDriver(FirewallDriver):
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def setup_basic_filtering(self, instance, network_info=None):
- """Use NWFilter from libvirt for this."""
+ """Set up provider rules and basic NWFilter."""
if not network_info:
network_info = netutils.get_network_info(instance)
- return self.nwfilter.setup_basic_filtering(instance, network_info)
+ self.nwfilter.setup_basic_filtering(instance, network_info)
+ if not self.basicly_filtered:
+ LOG.debug(_('iptables firewall: Setup Basic Filtering'))
+ self.refresh_provider_fw_rules()
+ self.basicly_filtered = True
def apply_instance_filter(self, instance):
"""No-op. Everything is done in prepare_instance_filter"""
@@ -543,7 +616,11 @@ class IptablesFirewallDriver(FirewallDriver):
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
- dhcp_servers = [network['gateway'] for (network, _m) in network_info]
+ # Pass through provider-wide drops
+ ipv4_rules += ['-j $provider']
+ ipv6_rules += ['-j $provider']
+
+ dhcp_servers = [info['gateway'] for (_n, info) in network_info]
for dhcp_server in dhcp_servers:
ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
@@ -560,7 +637,7 @@ class IptablesFirewallDriver(FirewallDriver):
# they're not worth the clutter.
if FLAGS.use_ipv6:
# Allow RA responses
- gateways_v6 = [network['gateway_v6'] for (network, _) in
+ gateways_v6 = [mapping['gateway6'] for (_n, mapping) in
network_info]
for gateway_v6 in gateways_v6:
ipv6_rules.append(
@@ -568,8 +645,8 @@ class IptablesFirewallDriver(FirewallDriver):
#Allow project network traffic
if FLAGS.allow_project_net_traffic:
- cidrv6s = [network['cidr_v6'] for (network, _m)
- in network_info]
+ cidrv6s = [network['cidr_v6'] for (network, _m) in
+ network_info]
for cidrv6 in cidrv6s:
ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
@@ -583,7 +660,7 @@ class IptablesFirewallDriver(FirewallDriver):
security_group['id'])
for rule in rules:
- logging.info('%r', rule)
+ LOG.debug(_('Adding security group rule: %r'), rule)
if not rule.cidr:
# Eventually, a mechanism to grant access for security
@@ -592,9 +669,9 @@ class IptablesFirewallDriver(FirewallDriver):
version = netutils.get_ip_version(rule.cidr)
if version == 4:
- rules = ipv4_rules
+ fw_rules = ipv4_rules
else:
- rules = ipv6_rules
+ fw_rules = ipv6_rules
protocol = rule.protocol
if version == 6 and rule.protocol == 'icmp':
@@ -629,7 +706,7 @@ class IptablesFirewallDriver(FirewallDriver):
icmp_type_arg]
args += ['-j ACCEPT']
- rules += [' '.join(args)]
+ fw_rules += [' '.join(args)]
ipv4_rules += ['-j $sg-fallback']
ipv6_rules += ['-j $sg-fallback']
@@ -657,6 +734,85 @@ class IptablesFirewallDriver(FirewallDriver):
network_info = netutils.get_network_info(instance)
self.add_filters_for_instance(instance, network_info)
+ def refresh_provider_fw_rules(self):
+ """See class:FirewallDriver: docs."""
+ self._do_refresh_provider_fw_rules()
+ self.iptables.apply()
+
+ @utils.synchronized('iptables', external=True)
+ def _do_refresh_provider_fw_rules(self):
+ """Internal, synchronized version of refresh_provider_fw_rules."""
+ self._purge_provider_fw_rules()
+ self._build_provider_fw_rules()
+
+ def _purge_provider_fw_rules(self):
+ """Remove all rules from the provider chains."""
+ self.iptables.ipv4['filter'].empty_chain('provider')
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].empty_chain('provider')
+
+ def _build_provider_fw_rules(self):
+ """Create all rules for the provider IP DROPs."""
+ self.iptables.ipv4['filter'].add_chain('provider')
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain('provider')
+ ipv4_rules, ipv6_rules = self._provider_rules()
+ for rule in ipv4_rules:
+ self.iptables.ipv4['filter'].add_rule('provider', rule)
+
+ if FLAGS.use_ipv6:
+ for rule in ipv6_rules:
+ self.iptables.ipv6['filter'].add_rule('provider', rule)
+
+ def _provider_rules(self):
+ """Generate a list of rules from provider for IP4 & IP6."""
+ ctxt = context.get_admin_context()
+ ipv4_rules = []
+ ipv6_rules = []
+ rules = db.provider_fw_rule_get_all(ctxt)
+ for rule in rules:
+ LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
+ version = netutils.get_ip_version(rule['cidr'])
+ if version == 4:
+ fw_rules = ipv4_rules
+ else:
+ fw_rules = ipv6_rules
+
+ protocol = rule['protocol']
+ if version == 6 and protocol == 'icmp':
+ protocol = 'icmpv6'
+
+ args = ['-p', protocol, '-s', rule['cidr']]
+
+ if protocol in ['udp', 'tcp']:
+ if rule['from_port'] == rule['to_port']:
+ args += ['--dport', '%s' % (rule['from_port'],)]
+ else:
+ args += ['-m', 'multiport',
+ '--dports', '%s:%s' % (rule['from_port'],
+ rule['to_port'])]
+ elif protocol == 'icmp':
+ icmp_type = rule['from_port']
+ icmp_code = rule['to_port']
+
+ if icmp_type == -1:
+ icmp_type_arg = None
+ else:
+ icmp_type_arg = '%s' % icmp_type
+ if not icmp_code == -1:
+ icmp_type_arg += '/%s' % icmp_code
+
+ if icmp_type_arg:
+ if version == 4:
+ args += ['-m', 'icmp', '--icmp-type',
+ icmp_type_arg]
+ elif version == 6:
+ args += ['-m', 'icmp6', '--icmpv6-type',
+ icmp_type_arg]
+ args += ['-j DROP']
+ fw_rules += [' '.join(args)]
+ return ipv4_rules, ipv6_rules
+
def _security_group_chain_name(self, security_group_id):
return 'nova-sg-%s' % (security_group_id,)
diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py
index c8c2dbc67..e5aaf7cec 100644
--- a/nova/virt/libvirt/netutils.py
+++ b/nova/virt/libvirt/netutils.py
@@ -21,7 +21,7 @@
"""Network-releated utilities for supporting libvirt connection code."""
-import IPy
+import netaddr
from nova import context
from nova import db
@@ -34,45 +34,51 @@ FLAGS = flags.FLAGS
def get_net_and_mask(cidr):
- net = IPy.IP(cidr)
- return str(net.net()), str(net.netmask())
+ net = netaddr.IPNetwork(cidr)
+ return str(net.ip), str(net.netmask)
def get_net_and_prefixlen(cidr):
- net = IPy.IP(cidr)
- return str(net.net()), str(net.prefixlen())
+ net = netaddr.IPNetwork(cidr)
+ return str(net.ip), str(net._prefixlen)
def get_ip_version(cidr):
- net = IPy.IP(cidr)
- return int(net.version())
+ net = netaddr.IPNetwork(cidr)
+ return int(net.version)
def get_network_info(instance):
+ # TODO(tr3buchet): this function needs to go away! network info
+ # MUST be passed down from compute
# TODO(adiantum) If we will keep this function
# we should cache network_info
admin_context = context.get_admin_context()
- ip_addresses = db.fixed_ip_get_by_instance(admin_context, instance['id'])
+ fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id'])
+ vifs = db.virtual_interface_get_by_instance(admin_context, instance['id'])
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
flavor = db.instance_type_get_by_id(admin_context,
instance['instance_type_id'])
network_info = []
- for network in networks:
- network_ips = [ip for ip in ip_addresses
- if ip['network_id'] == network['id']]
+ for vif in vifs:
+ network = vif['network']
+
+ # determine which of the instance's IPs belong to this network
+ network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if
+ fixed_ip['network_id'] == network['id']]
def ip_dict(ip):
return {
- 'ip': ip['address'],
+ 'ip': ip,
'netmask': network['netmask'],
'enabled': '1'}
def ip6_dict():
prefix = network['cidr_v6']
- mac = instance['mac_address']
+ mac = vif['address']
project_id = instance['project_id']
return {
'ip': ipv6.to_global(prefix, mac, project_id),
@@ -83,7 +89,7 @@ def get_network_info(instance):
'label': network['label'],
'gateway': network['gateway'],
'broadcast': network['broadcast'],
- 'mac': instance['mac_address'],
+ 'mac': vif['address'],
'rxtx_cap': flavor['rxtx_cap'],
'dns': [network['dns']],
'ips': [ip_dict(ip) for ip in network_ips]}
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 668c68c64..6ef4bc4c2 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -47,6 +47,21 @@ LOG = logging.getLogger("nova.virt.xenapi.vmops")
FLAGS = flags.FLAGS
+def cmp_version(a, b):
+ """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)"""
+ a = a.split('.')
+ b = b.split('.')
+
+ # Compare each individual portion of both version strings
+ for va, vb in zip(a, b):
+ ret = int(va) - int(vb)
+ if ret:
+ return ret
+
+ # Fallback to comparing length last
+ return len(a) - len(b)
+
+
class VMOps(object):
"""
Management class for VM-related tasks
@@ -214,6 +229,34 @@ class VMOps(object):
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
% locals())
+ ctx = context.get_admin_context()
+ agent_build = db.agent_build_get_by_triple(ctx, 'xen',
+ instance.os_type, instance.architecture)
+ if agent_build:
+ LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s' + \
+ '/%(architecture)s is %(version)s') % agent_build)
+ else:
+ LOG.info(_('No agent build found for %(hypervisor)s/%(os)s' + \
+ '/%(architecture)s') % {
+ 'hypervisor': 'xen',
+ 'os': instance.os_type,
+ 'architecture': instance.architecture})
+
+ def _check_agent_version():
+ version = self.get_agent_version(instance)
+ if not version:
+ LOG.info(_('No agent version returned by instance'))
+ return
+
+ LOG.info(_('Instance agent version: %s') % version)
+ if not agent_build:
+ return
+
+ if cmp_version(version, agent_build['version']) < 0:
+ LOG.info(_('Updating Agent to %s') % agent_build['version'])
+ self.agent_update(instance, agent_build['url'],
+ agent_build['md5hash'])
+
def _inject_files():
injected_files = instance.injected_files
if injected_files:
@@ -248,6 +291,7 @@ class VMOps(object):
if state == power_state.RUNNING:
LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
+ _check_agent_version()
_inject_files()
_set_admin_password()
return True
@@ -454,6 +498,34 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref)
self._session.wait_for_task(task, instance.id)
+ def get_agent_version(self, instance):
+ """Get the version of the agent running on the VM instance."""
+
+ # Send the encrypted password
+ transaction_id = str(uuid.uuid4())
+ args = {'id': transaction_id}
+ resp = self._make_agent_call('version', instance, '', args)
+ if resp is None:
+ # No response from the agent
+ return
+ resp_dict = json.loads(resp)
+ return resp_dict['message']
+
+ def agent_update(self, instance, url, md5sum):
+ """Update agent on the VM instance."""
+
+ # Send the encrypted password
+ transaction_id = str(uuid.uuid4())
+ args = {'id': transaction_id, 'url': url, 'md5sum': md5sum}
+ resp = self._make_agent_call('agentupdate', instance, '', args)
+ if resp is None:
+ # No response from the agent
+ return
+ resp_dict = json.loads(resp)
+ if resp_dict['returncode'] != '0':
+ raise RuntimeError(resp_dict['message'])
+ return resp_dict['message']
+
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance.