summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/__init__.py2
-rw-r--r--nova/adminclient.py63
-rw-r--r--nova/api/ec2/admin.py111
-rw-r--r--nova/api/ec2/apirequest.py8
-rw-r--r--nova/api/ec2/cloud.py25
-rw-r--r--nova/api/openstack/__init__.py1
-rw-r--r--nova/api/openstack/auth.py11
-rw-r--r--nova/api/openstack/backup_schedules.py1
-rw-r--r--nova/api/openstack/images.py2
-rw-r--r--nova/api/openstack/servers.py115
-rw-r--r--nova/api/openstack/shared_ip_groups.py2
-rw-r--r--nova/api/openstack/zones.py1
-rw-r--r--nova/compute/api.py37
-rw-r--r--nova/compute/instance_types.py2
-rw-r--r--nova/compute/manager.py12
-rw-r--r--nova/console/manager.py2
-rw-r--r--nova/console/xvp.py2
-rw-r--r--nova/db/api.py21
-rw-r--r--nova/db/sqlalchemy/api.py37
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py78
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py72
-rw-r--r--nova/db/sqlalchemy/models.py21
-rw-r--r--nova/flags.py52
-rw-r--r--nova/log.py134
-rw-r--r--nova/network/manager.py172
-rw-r--r--nova/quota.py14
-rw-r--r--nova/rpc.py13
-rw-r--r--nova/service.py45
-rw-r--r--nova/test.py63
-rw-r--r--nova/tests/__init__.py25
-rw-r--r--nova/tests/api/openstack/__init__.py4
-rw-r--r--nova/tests/api/openstack/fakes.py17
-rw-r--r--nova/tests/api/openstack/test_adminapi.py11
-rw-r--r--nova/tests/api/openstack/test_api.py4
-rw-r--r--nova/tests/api/openstack/test_auth.py46
-rw-r--r--nova/tests/api/openstack/test_common.py5
-rw-r--r--nova/tests/api/openstack/test_faults.py4
-rw-r--r--nova/tests/api/openstack/test_flavors.py10
-rw-r--r--nova/tests/api/openstack/test_images.py14
-rw-r--r--nova/tests/api/openstack/test_ratelimiting.py15
-rw-r--r--nova/tests/api/openstack/test_servers.py69
-rw-r--r--nova/tests/api/openstack/test_shared_ip_groups.py7
-rw-r--r--nova/tests/api/openstack/test_zones.py10
-rw-r--r--nova/tests/api/test_wsgi.py6
-rw-r--r--nova/tests/fake_flags.py6
-rw-r--r--nova/tests/glance/stubs.py40
-rw-r--r--nova/tests/objectstore_unittest.py1
-rw-r--r--nova/tests/test_api.py23
-rw-r--r--nova/tests/test_auth.py9
-rw-r--r--nova/tests/test_cloud.py116
-rw-r--r--nova/tests/test_compute.py8
-rw-r--r--nova/tests/test_console.py2
-rw-r--r--nova/tests/test_direct.py2
-rw-r--r--nova/tests/test_localization.py1
-rw-r--r--nova/tests/test_log.py63
-rw-r--r--nova/tests/test_misc.py2
-rw-r--r--nova/tests/test_network.py11
-rw-r--r--nova/tests/test_quota.py25
-rw-r--r--nova/tests/test_scheduler.py102
-rw-r--r--nova/tests/test_service.py7
-rw-r--r--nova/tests/test_test.py40
-rw-r--r--nova/tests/test_utils.py174
-rw-r--r--nova/tests/test_virt.py6
-rw-r--r--nova/tests/test_xenapi.py72
-rw-r--r--nova/tests/xenapi/stubs.py12
-rw-r--r--nova/twistd.py2
-rw-r--r--nova/utils.py53
-rw-r--r--nova/virt/disk.py8
-rw-r--r--nova/virt/fake.py4
-rw-r--r--nova/virt/xenapi/vm_utils.py228
-rw-r--r--nova/virt/xenapi/vmops.py218
-rw-r--r--nova/virt/xenapi_conn.py6
-rw-r--r--nova/volume/driver.py179
-rw-r--r--nova/volume/manager.py8
-rw-r--r--nova/volume/san.py312
-rw-r--r--nova/wsgi.py8
76 files changed, 2335 insertions, 779 deletions
diff --git a/nova/__init__.py b/nova/__init__.py
index 8745617bc..256db55a9 100644
--- a/nova/__init__.py
+++ b/nova/__init__.py
@@ -30,5 +30,3 @@
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
-
-from exception import *
diff --git a/nova/adminclient.py b/nova/adminclient.py
index c614b274c..fc3c5c5fe 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -23,6 +23,8 @@ import base64
import boto
import boto.exception
import httplib
+import re
+import string
from boto.ec2.regioninfo import RegionInfo
@@ -165,19 +167,20 @@ class HostInfo(object):
**Fields Include**
- * Disk stats
- * Running Instances
- * Memory stats
- * CPU stats
- * Network address info
- * Firewall info
- * Bridge and devices
-
+ * Hostname
+ * Compute service status
+ * Volume service status
+ * Instance count
+ * Volume count
"""
def __init__(self, connection=None):
self.connection = connection
self.hostname = None
+ self.compute = None
+ self.volume = None
+ self.instance_count = 0
+ self.volume_count = 0
def __repr__(self):
return 'Host:%s' % self.hostname
@@ -188,7 +191,39 @@ class HostInfo(object):
# this is needed by the sax parser, so ignore the ugly name
def endElement(self, name, value, connection):
- setattr(self, name, value)
+ fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name))
+ setattr(self, fixed_name, value)
+
+
+class Vpn(object):
+ """
+ Information about a Vpn, as parsed through SAX
+
+ **Fields Include**
+
+ * instance_id
+ * project_id
+ * public_ip
+ * public_port
+ * created_at
+ * internal_ip
+ * state
+ """
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.instance_id = None
+ self.project_id = None
+
+ def __repr__(self):
+ return 'Vpn:%s:%s' % (self.project_id, self.instance_id)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name))
+ setattr(self, fixed_name, value)
class InstanceType(object):
@@ -422,6 +457,16 @@ class NovaAdminClient(object):
zip = self.apiconn.get_object('GenerateX509ForUser', params, UserInfo)
return zip.file
+ def start_vpn(self, project):
+ """
+ Starts the vpn for a user
+ """
+ return self.apiconn.get_object('StartVpn', {'Project': project}, Vpn)
+
+ def get_vpns(self):
+ """Return a list of vpn with project name"""
+ return self.apiconn.get_list('DescribeVpns', {}, [('item', Vpn)])
+
def get_hosts(self):
return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)])
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index 735951082..e2a05fce1 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -21,14 +21,18 @@ Admin API controller, exposed through http via the api worker.
"""
import base64
+import datetime
from nova import db
from nova import exception
+from nova import flags
from nova import log as logging
+from nova import utils
from nova.auth import manager
from nova.compute import instance_types
+FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.api.ec2.admin')
@@ -55,12 +59,25 @@ def project_dict(project):
return {}
-def host_dict(host):
+def host_dict(host, compute_service, instances, volume_service, volumes, now):
"""Convert a host model object to a result dict"""
- if host:
- return host.state
- else:
- return {}
+ rv = {'hostanme': host, 'instance_count': len(instances),
+ 'volume_count': len(volumes)}
+ if compute_service:
+ latest = compute_service['updated_at'] or compute_service['created_at']
+ delta = now - latest
+ if delta.seconds <= FLAGS.service_down_time:
+ rv['compute'] = 'up'
+ else:
+ rv['compute'] = 'down'
+ if volume_service:
+ latest = volume_service['updated_at'] or volume_service['created_at']
+ delta = now - latest
+ if delta.seconds <= FLAGS.service_down_time:
+ rv['volume'] = 'up'
+ else:
+ rv['volume'] = 'down'
+ return rv
def instance_dict(name, inst):
@@ -71,6 +88,25 @@ def instance_dict(name, inst):
'flavor_id': inst['flavorid']}
+def vpn_dict(project, vpn_instance):
+ rv = {'project_id': project.id,
+ 'public_ip': project.vpn_ip,
+ 'public_port': project.vpn_port}
+ if vpn_instance:
+ rv['instance_id'] = vpn_instance['ec2_id']
+ rv['created_at'] = utils.isotime(vpn_instance['created_at'])
+ address = vpn_instance.get('fixed_ip', None)
+ if address:
+ rv['internal_ip'] = address['address']
+ if utils.vpn_ping(project.vpn_ip, project.vpn_port):
+ rv['state'] = 'running'
+ else:
+ rv['state'] = 'down'
+ else:
+ rv['state'] = 'pending'
+ return rv
+
+
class AdminController(object):
"""
API Controller for users, hosts, nodes, and workers.
@@ -223,19 +259,68 @@ class AdminController(object):
raise exception.ApiError(_('operation must be add or remove'))
return True
+ def _vpn_for(self, context, project_id):
+ """Get the VPN instance for a project ID."""
+ for instance in db.instance_get_all_by_project(context, project_id):
+ if (instance['image_id'] == FLAGS.vpn_image_id
+ and not instance['state_description'] in
+ ['shutting_down', 'shutdown']):
+ return instance
+
+ def start_vpn(self, context, project):
+ instance = self._vpn_for(context, project)
+ if not instance:
+ # NOTE(vish) import delayed because of __init__.py
+ from nova.cloudpipe import pipelib
+ pipe = pipelib.CloudPipe()
+ try:
+ pipe.launch_vpn_instance(project)
+ except db.NoMoreNetworks:
+ raise exception.ApiError("Unable to claim IP for VPN instance"
+ ", ensure it isn't running, and try "
+ "again in a few minutes")
+ instance = self._vpn_for(context, project)
+ return {'instance_id': instance['ec2_id']}
+
+ def describe_vpns(self, context):
+ vpns = []
+ for project in manager.AuthManager().get_projects():
+ instance = self._vpn_for(context, project.id)
+ vpns.append(vpn_dict(project, instance))
+ return {'items': vpns}
+
# FIXME(vish): these host commands don't work yet, perhaps some of the
# required data can be retrieved from service objects?
- def describe_hosts(self, _context, **_kwargs):
+ def describe_hosts(self, context, **_kwargs):
"""Returns status info for all nodes. Includes:
- * Disk Space
- * Instance List
- * RAM used
- * CPU used
- * DHCP servers running
- * Iptables / bridges
+ * Hostname
+ * Compute (up, down, None)
+ * Instance count
+ * Volume (up, down, None)
+ * Volume Count
"""
- return {'hostSet': [host_dict(h) for h in db.host_get_all()]}
+ services = db.service_get_all(context)
+ now = datetime.datetime.utcnow()
+ hosts = []
+ rv = []
+ for host in [service['host'] for service in services]:
+ if not host in hosts:
+ hosts.append(host)
+ for host in hosts:
+ compute = [s for s in services if s['host'] == host \
+ and s['binary'] == 'nova-compute']
+ if compute:
+ compute = compute[0]
+ instances = db.instance_get_all_by_host(context, host)
+ volume = [s for s in services if s['host'] == host \
+ and s['binary'] == 'nova-volume']
+ if volume:
+ volume = volume[0]
+ volumes = db.volume_get_all_by_host(context, host)
+ rv.append(host_dict(host, compute, instances, volume, volumes,
+ now))
+ return {'hosts': rv}
def describe_host(self, _context, name, **_kwargs):
"""Returns status info for single node."""
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 00b527d62..2b1acba5a 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -46,6 +46,11 @@ def _underscore_to_xmlcase(str):
return res[:1].lower() + res[1:]
+def _database_to_isoformat(datetimeobj):
+ """Return a xs:dateTime parsable string from datatime"""
+ return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+
def _try_convert(value):
"""Return a non-string if possible"""
if value == 'None':
@@ -173,7 +178,8 @@ class APIRequest(object):
elif isinstance(data, bool):
data_el.appendChild(xml.createTextNode(str(data).lower()))
elif isinstance(data, datetime.datetime):
- data_el.appendChild(xml.createTextNode(data.isoformat()))
+ data_el.appendChild(
+ xml.createTextNode(_database_to_isoformat(data)))
elif data != None:
data_el.appendChild(xml.createTextNode(str(data)))
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 882cdcfc9..844ccbe5e 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -198,8 +198,9 @@ class CloudController(object):
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
- enabled_services = db.service_get_all(context)
- disabled_services = db.service_get_all(context, True)
+ ctxt = context.elevated()
+ enabled_services = db.service_get_all(ctxt)
+ disabled_services = db.service_get_all(ctxt, True)
available_zones = []
for zone in [service.availability_zone for service
in enabled_services]:
@@ -318,14 +319,19 @@ class CloudController(object):
def describe_security_groups(self, context, group_name=None, **kwargs):
self.compute_api.ensure_default_security_group(context)
- if context.is_admin:
+ if group_name:
+ groups = []
+ for name in group_name:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ name)
+ groups.append(group)
+ elif context.is_admin:
groups = db.security_group_get_all(context)
else:
groups = db.security_group_get_by_project(context,
context.project_id)
groups = [self._format_security_group(context, g) for g in groups]
- if not group_name is None:
- groups = [g for g in groups if g.name in group_name]
return {'securityGroupInfo':
list(sorted(groups,
@@ -529,8 +535,9 @@ class CloudController(object):
def get_ajax_console(self, context, instance_id, **kwargs):
ec2_id = instance_id[0]
- internal_id = ec2_id_to_id(ec2_id)
- return self.compute_api.get_ajax_console(context, internal_id)
+ instance_id = ec2_id_to_id(ec2_id)
+ return self.compute_api.get_ajax_console(context,
+ instance_id=instance_id)
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
@@ -669,7 +676,8 @@ class CloudController(object):
instances = []
for ec2_id in instance_id:
internal_id = ec2_id_to_id(ec2_id)
- instance = self.compute_api.get(context, internal_id)
+ instance = self.compute_api.get(context,
+ instance_id=internal_id)
instances.append(instance)
else:
instances = self.compute_api.get_all(context, **kwargs)
@@ -882,7 +890,6 @@ class CloudController(object):
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
try:
- image = self.image_service.show(context, image_id)
image = self._format_image(context,
self.image_service.show(context,
image_id))
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index d0b18eced..b1b38ed2d 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -81,6 +81,7 @@ class APIRouter(wsgi.Router):
server_members['suspend'] = 'POST'
server_members['resume'] = 'POST'
server_members['reset_network'] = 'POST'
+ server_members['inject_network_info'] = 'POST'
mapper.resource("zone", "zones", controller=zones.Controller(),
collection={'detail': 'GET'})
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 473071738..6011e6115 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -19,7 +19,6 @@ import datetime
import hashlib
import json
import time
-import logging
import webob.exc
import webob.dec
@@ -27,6 +26,7 @@ import webob.dec
from nova import auth
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import manager
from nova import utils
@@ -104,11 +104,14 @@ class AuthMiddleware(wsgi.Middleware):
2 days ago.
"""
ctxt = context.get_admin_context()
- token = self.db.auth_get_token(ctxt, token_hash)
+ try:
+ token = self.db.auth_token_get(ctxt, token_hash)
+ except exception.NotFound:
+ return None
if token:
delta = datetime.datetime.now() - token.created_at
if delta.days >= 2:
- self.db.auth_destroy_token(ctxt, token)
+ self.db.auth_token_destroy(ctxt, token.token_hash)
else:
return self.auth.get_user(token.user_id)
return None
@@ -132,6 +135,6 @@ class AuthMiddleware(wsgi.Middleware):
token_dict['server_management_url'] = req.url
token_dict['storage_url'] = ''
token_dict['user_id'] = user.id
- token = self.db.auth_create_token(ctxt, token_dict)
+ token = self.db.auth_token_create(ctxt, token_dict)
return token, user
return None, None
diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index 197125d86..7abb5f884 100644
--- a/nova/api/openstack/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -15,7 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
import time
from webob import exc
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 9d56bc508..cf85a496f 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from webob import exc
from nova import compute
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index ce9601ecb..69273ad7b 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import hashlib
import json
import traceback
@@ -33,7 +34,6 @@ import nova.api.openstack
LOG = logging.getLogger('server')
-LOG.setLevel(logging.DEBUG)
FLAGS = flags.FLAGS
@@ -51,7 +51,8 @@ def _translate_detail_keys(inst):
power_state.PAUSED: 'paused',
power_state.SHUTDOWN: 'active',
power_state.SHUTOFF: 'active',
- power_state.CRASHED: 'error'}
+ power_state.CRASHED: 'error',
+ power_state.FAILED: 'error'}
inst_dict = {}
mapped_keys = dict(status='state', imageId='image_id',
@@ -64,22 +65,22 @@ def _translate_detail_keys(inst):
inst_dict['addresses'] = dict(public=[], private=[])
# grab single private fixed ip
- try:
- private_ip = inst['fixed_ip']['address']
- if private_ip:
- inst_dict['addresses']['private'].append(private_ip)
- except KeyError:
- LOG.debug(_("Failed to read private ip"))
+ private_ips = utils.get_from_path(inst, 'fixed_ip/address')
+ inst_dict['addresses']['private'] = private_ips
# grab all public floating ips
- try:
- for floating in inst['fixed_ip']['floating_ips']:
- inst_dict['addresses']['public'].append(floating['address'])
- except KeyError:
- LOG.debug(_("Failed to read public ip(s)"))
+ public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
+ inst_dict['addresses']['public'] = public_ips
+
+ # Return the metadata as a dictionary
+ metadata = {}
+ for item in inst['metadata']:
+ metadata[item['key']] = item['value']
+ inst_dict['metadata'] = metadata
- inst_dict['metadata'] = {}
inst_dict['hostId'] = ''
+ if inst['host']:
+ inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest()
return dict(server=inst_dict)
@@ -138,42 +139,35 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
- def _get_kernel_ramdisk_from_image(self, req, image_id):
- """
- Machine images are associated with Kernels and Ramdisk images via
- metadata stored in Glance as 'image_properties'
- """
- def lookup(param):
- _image_id = image_id
- try:
- return image['properties'][param]
- except KeyError:
- raise exception.NotFound(
- _("%(param)s property not found for image %(_image_id)s") %
- locals())
-
- image_id = str(image_id)
- image = self._image_service.show(req.environ['nova.context'], image_id)
- return lookup('kernel_id'), lookup('ramdisk_id')
-
def create(self, req):
""" Creates a new server for a given user """
env = self._deserialize(req.body, req)
if not env:
return faults.Fault(exc.HTTPUnprocessableEntity())
- key_pairs = auth_manager.AuthManager.get_key_pairs(
- req.environ['nova.context'])
+ context = req.environ['nova.context']
+ key_pairs = auth_manager.AuthManager.get_key_pairs(context)
if not key_pairs:
raise exception.NotFound(_("No keypairs defined"))
key_pair = key_pairs[0]
image_id = common.get_image_id_from_image_hash(self._image_service,
- req.environ['nova.context'], env['server']['imageId'])
+ context, env['server']['imageId'])
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
req, image_id)
+
+ # Metadata is a list, not a Dictionary, because we allow duplicate keys
+ # (even though JSON can't encode this)
+ # In future, we may not allow duplicate keys.
+ # However, the CloudServers API is not definitive on this front,
+ # and we want to be compatible.
+ metadata = []
+ if env['server'].get('metadata'):
+ for k, v in env['server']['metadata'].items():
+ metadata.append({'key': k, 'value': v})
+
instances = self.compute_api.create(
- req.environ['nova.context'],
+ context,
instance_types.get_by_flavor_id(env['server']['flavorId']),
image_id,
kernel_id=kernel_id,
@@ -182,6 +176,7 @@ class Controller(wsgi.Controller):
display_description=env['server']['name'],
key_name=key_pair['name'],
key_data=key_pair['public_key'],
+ metadata=metadata,
onset_files=env.get('onset_files', []))
return _translate_keys(instances[0])
@@ -282,6 +277,20 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def inject_network_info(self, req, id):
+ """
+ Inject network info for an instance (admin only).
+
+ """
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.inject_network_info(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("Compute.api::inject_network_info %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
def pause(self, req, id):
""" Permit Admins to Pause the server. """
ctxt = req.environ['nova.context']
@@ -353,3 +362,37 @@ class Controller(wsgi.Controller):
action=item.action,
error=item.error))
return dict(actions=actions)
+
+ def _get_kernel_ramdisk_from_image(self, req, image_id):
+ """Retrevies kernel and ramdisk IDs from Glance
+
+ Only 'machine' (ami) type use kernel and ramdisk outside of the
+ image.
+ """
+ # FIXME(sirp): Since we're retrieving the kernel_id from an
+ # image_property, this means only Glance is supported.
+ # The BaseImageService needs to expose a consistent way of accessing
+ # kernel_id and ramdisk_id
+ image = self._image_service.show(req.environ['nova.context'], image_id)
+
+ if image['status'] != 'active':
+ raise exception.Invalid(
+ _("Cannot build from image %(image_id)s, status not active") %
+ locals())
+
+ if image['type'] != 'machine':
+ return None, None
+
+ try:
+ kernel_id = image['properties']['kernel_id']
+ except KeyError:
+ raise exception.NotFound(
+ _("Kernel not found for image %(image_id)s") % locals())
+
+ try:
+ ramdisk_id = image['properties']['ramdisk_id']
+ except KeyError:
+ raise exception.NotFound(
+ _("Ramdisk not found for image %(image_id)s") % locals())
+
+ return kernel_id, ramdisk_id
diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py
index bd3cc23a8..5d78f9377 100644
--- a/nova/api/openstack/shared_ip_groups.py
+++ b/nova/api/openstack/shared_ip_groups.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from webob import exc
from nova import wsgi
diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py
index 830464ffd..d5206da20 100644
--- a/nova/api/openstack/zones.py
+++ b/nova/api/openstack/zones.py
@@ -14,7 +14,6 @@
# under the License.
import common
-import logging
from nova import flags
from nova import wsgi
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 81ea6dc53..625778b66 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -85,7 +85,7 @@ class API(base.Base):
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
- availability_zone=None, user_data=None,
+ availability_zone=None, user_data=None, metadata=[],
onset_files=None):
"""Create the number of instances requested if quota and
other arguments check out ok.
@@ -100,11 +100,36 @@ class API(base.Base):
"run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
+ num_metadata = len(metadata)
+ quota_metadata = quota.allowed_metadata_items(context, num_metadata)
+ if quota_metadata < num_metadata:
+ pid = context.project_id
+ msg = (_("Quota exceeeded for %(pid)s,"
+ " tried to set %(num_metadata)s metadata properties")
+ % locals())
+ LOG.warn(msg)
+ raise quota.QuotaError(msg, "MetadataLimitExceeded")
+
+ # Because metadata is stored in the DB, we hard-code the size limits
+ # In future, we may support more variable length strings, so we act
+ # as if this is quota-controlled for forwards compatibility
+ for metadata_item in metadata:
+ k = metadata_item['key']
+ v = metadata_item['value']
+ if len(k) > 255 or len(v) > 255:
+ pid = context.project_id
+ msg = (_("Quota exceeeded for %(pid)s,"
+ " metadata property key or value too long")
+ % locals())
+ LOG.warn(msg)
+ raise quota.QuotaError(msg, "MetadataLimitExceeded")
+
image = self.image_service.show(context, image_id)
if kernel_id is None:
kernel_id = image.get('kernel_id', None)
if ramdisk_id is None:
ramdisk_id = image.get('ramdisk_id', None)
+ # FIXME(sirp): is there a way we can remove null_kernel?
# No kernel and ramdisk for raw images
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
@@ -154,6 +179,7 @@ class API(base.Base):
'key_name': key_name,
'key_data': key_data,
'locked': False,
+ 'metadata': metadata,
'availability_zone': availability_zone}
elevated = context.elevated()
instances = []
@@ -447,7 +473,7 @@ class API(base.Base):
{'method': 'authorize_ajax_console',
'args': {'token': output['token'], 'host': output['host'],
'port': output['port']}})
- return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url,
+ return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url,
output['token'])}
def get_console_output(self, context, instance_id):
@@ -476,6 +502,13 @@ class API(base.Base):
"""
self._cast_compute_message('reset_network', context, instance_id)
+ def inject_network_info(self, context, instance_id):
+ """
+ Inject network info for the instance.
+
+ """
+ self._cast_compute_message('inject_network_info', context, instance_id)
+
def attach_volume(self, context, instance_id, volume_id, device):
if not re.match("^/dev/[a-z]d[a-z]+$", device):
raise exception.ApiError(_("Invalid device specified: %s. "
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 309313fd0..7a2a5baa3 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -45,6 +45,6 @@ def get_by_type(instance_type):
def get_by_flavor_id(flavor_id):
for instance_type, details in INSTANCE_TYPES.iteritems():
- if details['flavorid'] == flavor_id:
+ if details['flavorid'] == int(flavor_id):
return instance_type
return FLAGS.default_instance_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index b8d4b7ee9..d659712ad 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -527,6 +527,18 @@ class ComputeManager(manager.Manager):
context=context)
self.driver.reset_network(instance_ref)
+ @checks_instance_lock
+ def inject_network_info(self, context, instance_id):
+ """
+ Inject network info for the instance.
+
+ """
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ LOG.debug(_('instance %s: inject network info'), instance_id,
+ context=context)
+ self.driver.inject_network_info(instance_ref)
+
@exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
diff --git a/nova/console/manager.py b/nova/console/manager.py
index 5697e7cb1..57c75cf4f 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -20,11 +20,11 @@ Console Proxy Service
"""
import functools
-import logging
import socket
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index ee66dac46..cd257e0a6 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -20,7 +20,6 @@ XVP (Xenserver VNC Proxy) driver.
"""
import fcntl
-import logging
import os
import signal
import subprocess
@@ -31,6 +30,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
flags.DEFINE_string('console_xvp_conf_template',
diff --git a/nova/db/api.py b/nova/db/api.py
index d7f3746d2..dcaf55e8f 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -85,8 +85,8 @@ def service_get(context, service_id):
def service_get_all(context, disabled=False):
- """Get all service."""
- return IMPL.service_get_all(context, None, disabled)
+ """Get all services."""
+ return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
@@ -630,19 +630,24 @@ def iscsi_target_create_safe(context, values):
###############
-def auth_destroy_token(context, token):
+def auth_token_destroy(context, token_id):
"""Destroy an auth token."""
- return IMPL.auth_destroy_token(context, token)
+ return IMPL.auth_token_destroy(context, token_id)
-def auth_get_token(context, token_hash):
+def auth_token_get(context, token_hash):
"""Retrieves a token given the hash representing it."""
- return IMPL.auth_get_token(context, token_hash)
+ return IMPL.auth_token_get(context, token_hash)
-def auth_create_token(context, token):
+def auth_token_update(context, token_hash, values):
+ """Updates a token given the hash representing it."""
+ return IMPL.auth_token_update(context, token_hash, values)
+
+
+def auth_token_create(context, token):
"""Creates a new token."""
- return IMPL.auth_create_token(context, token)
+ return IMPL.auth_token_create(context, token)
###################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 2697fac73..6df2a8843 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -136,15 +136,12 @@ def service_get(context, service_id, session=None):
@require_admin_context
-def service_get_all(context, session=None, disabled=False):
- if not session:
- session = get_session()
-
- result = session.query(models.Service).\
+def service_get_all(context, disabled=False):
+ session = get_session()
+ return session.query(models.Service).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(disabled=disabled).\
all()
- return result
@require_admin_context
@@ -715,6 +712,7 @@ def instance_get(context, instance_id, session=None):
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -723,6 +721,7 @@ def instance_get(context, instance_id, session=None):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
+ options(joinedload('metadata')).\
filter_by(project_id=context.project_id).\
filter_by(id=instance_id).\
filter_by(deleted=False).\
@@ -1046,7 +1045,8 @@ def network_create_safe(context, values):
@require_admin_context
def network_disassociate(context, network_id):
- network_update(context, network_id, {'project_id': None})
+ network_update(context, network_id, {'project_id': None,
+ 'host': None})
@require_admin_context
@@ -1262,16 +1262,20 @@ def iscsi_target_create_safe(context, values):
@require_admin_context
-def auth_destroy_token(_context, token):
+def auth_token_destroy(context, token_id):
session = get_session()
- session.delete(token)
+ with session.begin():
+ token_ref = auth_token_get(context, token_id, session=session)
+ token_ref.delete(session=session)
@require_admin_context
-def auth_get_token(_context, token_hash):
- session = get_session()
+def auth_token_get(context, token_hash, session=None):
+ if session is None:
+ session = get_session()
tk = session.query(models.AuthToken).\
filter_by(token_hash=token_hash).\
+ filter_by(deleted=can_read_deleted(context)).\
first()
if not tk:
raise exception.NotFound(_('Token %s does not exist') % token_hash)
@@ -1279,7 +1283,16 @@ def auth_get_token(_context, token_hash):
@require_admin_context
-def auth_create_token(_context, token):
+def auth_token_update(context, token_hash, values):
+ session = get_session()
+ with session.begin():
+ token_ref = auth_token_get(context, token_hash, session=session)
+ token_ref.update(values)
+ token_ref.save(session=session)
+
+
+@require_admin_context
+def auth_token_create(_context, token):
tk = models.AuthToken()
tk.update(token)
tk.save()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
new file mode 100644
index 000000000..4cb07e0d8
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
@@ -0,0 +1,78 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+quotas = Table('quotas', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+
+instance_metadata_table = Table('instance_metadata', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=False),
+ Column('key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('value',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+#
+# New columns
+#
+quota_metadata_items = Column('metadata_items', Integer())
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (instance_metadata_table, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+ quotas.create_column(quota_metadata_items)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
new file mode 100644
index 000000000..705fc8ff3
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
@@ -0,0 +1,72 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+# None
+
+#
+# Tables to alter
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+
+volumes_provider_location = Column('provider_location',
+ String(length=256,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+volumes_provider_auth = Column('provider_auth',
+ String(length=256,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Add columns to existing tables
+ volumes.create_column(volumes_provider_location)
+ volumes.create_column(volumes_provider_auth)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 40a96fc17..1882efeba 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -243,6 +243,9 @@ class Volume(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
+ provider_location = Column(String(255))
+ provider_auth = Column(String(255))
+
class Quota(BASE, NovaBase):
"""Represents quota overrides for a project."""
@@ -256,6 +259,7 @@ class Quota(BASE, NovaBase):
volumes = Column(Integer)
gigabytes = Column(Integer)
floating_ips = Column(Integer)
+ metadata_items = Column(Integer)
class ExportDevice(BASE, NovaBase):
@@ -536,6 +540,20 @@ class Console(BASE, NovaBase):
pool = relationship(ConsolePool, backref=backref('consoles'))
+class InstanceMetadata(BASE, NovaBase):
+ """Represents a metadata key/value pair for an instance"""
+ __tablename__ = 'instance_metadata'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ value = Column(String(255))
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
+ instance = relationship(Instance, backref="metadata",
+ foreign_keys=instance_id,
+ primaryjoin='and_('
+ 'InstanceMetadata.instance_id == Instance.id,'
+ 'InstanceMetadata.deleted == False)')
+
+
class Zone(BASE, NovaBase):
"""Represents a child zone of this zone."""
__tablename__ = 'zones'
@@ -557,7 +575,8 @@ def register_models():
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
- Project, Certificate, ConsolePool, Console, Zone)
+ Project, Certificate, ConsolePool, Console, Zone,
+ InstanceMetadata)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/flags.py b/nova/flags.py
index f64a62da9..8cf199b2f 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -160,9 +160,45 @@ class StrWrapper(object):
raise KeyError(name)
-FLAGS = FlagValues()
-gflags.FLAGS = FLAGS
-gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
+# Copied from gflags with small mods to get the naming correct.
+# Originally gflags checks for the first module that is not gflags that is
+# in the call chain, we want to check for the first module that is not gflags
+# and not this module.
+def _GetCallingModule():
+ """Returns the name of the module that's calling into this module.
+
+ We generally use this function to get the name of the module calling a
+ DEFINE_foo... function.
+ """
+ # Walk down the stack to find the first globals dict that's not ours.
+ for depth in range(1, sys.getrecursionlimit()):
+ if not sys._getframe(depth).f_globals is globals():
+ module_name = __GetModuleName(sys._getframe(depth).f_globals)
+ if module_name == 'gflags':
+ continue
+ if module_name is not None:
+ return module_name
+ raise AssertionError("No module was found")
+
+
+# Copied from gflags because it is a private function
+def __GetModuleName(globals_dict):
+ """Given a globals dict, returns the name of the module that defines it.
+
+ Args:
+ globals_dict: A dictionary that should correspond to an environment
+ providing the values of the globals.
+
+ Returns:
+ A string (the name of the module) or None (if the module could not
+ be identified.
+ """
+ for name, module in sys.modules.iteritems():
+ if getattr(module, '__dict__', None) is globals_dict:
+ if name == '__main__':
+ return sys.argv[0]
+ return name
+ return None
def _wrapper(func):
@@ -173,6 +209,11 @@ def _wrapper(func):
return _wrapped
+FLAGS = FlagValues()
+gflags.FLAGS = FLAGS
+gflags._GetCallingModule = _GetCallingModule
+
+
DEFINE = _wrapper(gflags.DEFINE)
DEFINE_string = _wrapper(gflags.DEFINE_string)
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
@@ -185,8 +226,6 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist)
DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
DEFINE_flag = _wrapper(gflags.DEFINE_flag)
-
-
HelpFlag = gflags.HelpFlag
HelpshortFlag = gflags.HelpshortFlag
HelpXMLFlag = gflags.HelpXMLFlag
@@ -285,8 +324,9 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory')
+DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite')
DEFINE_string('sql_connection',
- 'sqlite:///$state_path/nova.sqlite',
+ 'sqlite:///$state_path/$sqlite_db',
'connection string for sql database')
DEFINE_integer('sql_idle_timeout',
3600,
diff --git a/nova/log.py b/nova/log.py
index 6b201ffcc..87a21ddb4 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -54,7 +54,7 @@ flags.DEFINE_string('logging_default_format_string',
'format string to use for log messages without context')
flags.DEFINE_string('logging_debug_format_suffix',
- 'from %(processName)s (pid=%(process)d) %(funcName)s'
+ 'from (pid=%(process)d) %(funcName)s'
' %(pathname)s:%(lineno)d',
'data to append to log format when level is DEBUG')
@@ -65,6 +65,7 @@ flags.DEFINE_string('logging_exception_prefix',
flags.DEFINE_list('default_log_levels',
['amqplib=WARN',
'sqlalchemy=WARN',
+ 'boto=WARN',
'eventlet.wsgi.server=WARN'],
'list of logger=LEVEL pairs')
@@ -117,7 +118,7 @@ def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
-def get_log_file_path(binary=None):
+def _get_log_file_path(binary=None):
if FLAGS.logfile:
return FLAGS.logfile
if FLAGS.logdir:
@@ -125,25 +126,6 @@ def get_log_file_path(binary=None):
return '%s.log' % (os.path.join(FLAGS.logdir, binary),)
-def basicConfig():
- logging.basicConfig()
- for handler in logging.root.handlers:
- handler.setFormatter(_formatter)
- if FLAGS.verbose:
- logging.root.setLevel(logging.DEBUG)
- else:
- logging.root.setLevel(logging.INFO)
- if FLAGS.use_syslog:
- syslog = SysLogHandler(address='/dev/log')
- syslog.setFormatter(_formatter)
- logging.root.addHandler(syslog)
- logpath = get_log_file_path()
- if logpath:
- logfile = WatchedFileHandler(logpath)
- logfile.setFormatter(_formatter)
- logging.root.addHandler(logfile)
-
-
class NovaLogger(logging.Logger):
"""
NovaLogger manages request context and formatting.
@@ -151,23 +133,19 @@ class NovaLogger(logging.Logger):
This becomes the class that is instanciated by logging.getLogger.
"""
def __init__(self, name, level=NOTSET):
- level_name = self._get_level_from_flags(name, FLAGS)
- level = globals()[level_name]
logging.Logger.__init__(self, name, level)
+ self.setup_from_flags()
- def _get_level_from_flags(self, name, FLAGS):
- # if exactly "nova", or a child logger, honor the verbose flag
- if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose:
- return 'DEBUG'
+ def setup_from_flags(self):
+ """Setup logger from flags"""
+ level = NOTSET
for pair in FLAGS.default_log_levels:
- logger, _sep, level = pair.partition('=')
+ logger, _sep, level_name = pair.partition('=')
# NOTE(todd): if we set a.b, we want a.b.c to have the same level
# (but not a.bc, so we check the dot)
- if name == logger:
- return level
- if name.startswith(logger) and name[len(logger)] == '.':
- return level
- return 'INFO'
+ if self.name == logger or self.name.startswith("%s." % logger):
+ level = globals()[level_name]
+ self.setLevel(level)
def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
"""Extract context from any log call"""
@@ -176,12 +154,12 @@ class NovaLogger(logging.Logger):
if context:
extra.update(_dictify_context(context))
extra.update({"nova_version": version.version_string_with_vcs()})
- logging.Logger._log(self, level, msg, args, exc_info, extra)
+ return logging.Logger._log(self, level, msg, args, exc_info, extra)
def addHandler(self, handler):
"""Each handler gets our custom formatter"""
handler.setFormatter(_formatter)
- logging.Logger.addHandler(self, handler)
+ return logging.Logger.addHandler(self, handler)
def audit(self, msg, *args, **kwargs):
"""Shortcut for our AUDIT level"""
@@ -208,23 +186,6 @@ class NovaLogger(logging.Logger):
self.error(message, **kwargs)
-def handle_exception(type, value, tb):
- logging.root.critical(str(value), exc_info=(type, value, tb))
-
-
-sys.excepthook = handle_exception
-logging.setLoggerClass(NovaLogger)
-
-
-class NovaRootLogger(NovaLogger):
- pass
-
-if not isinstance(logging.root, NovaRootLogger):
- logging.root = NovaRootLogger("nova.root", WARNING)
- NovaLogger.root = logging.root
- NovaLogger.manager.root = logging.root
-
-
class NovaFormatter(logging.Formatter):
"""
A nova.context.RequestContext aware formatter configured through flags.
@@ -271,8 +232,73 @@ class NovaFormatter(logging.Formatter):
_formatter = NovaFormatter()
+class NovaRootLogger(NovaLogger):
+ def __init__(self, name, level=NOTSET):
+ self.logpath = None
+ self.filelog = None
+ self.streamlog = StreamHandler()
+ self.syslog = None
+ NovaLogger.__init__(self, name, level)
+
+ def setup_from_flags(self):
+ """Setup logger from flags"""
+ global _filelog
+ if FLAGS.use_syslog:
+ self.syslog = SysLogHandler(address='/dev/log')
+ self.addHandler(self.syslog)
+ elif self.syslog:
+ self.removeHandler(self.syslog)
+ logpath = _get_log_file_path()
+ if logpath:
+ self.removeHandler(self.streamlog)
+ if logpath != self.logpath:
+ self.removeHandler(self.filelog)
+ self.filelog = WatchedFileHandler(logpath)
+ self.addHandler(self.filelog)
+ self.logpath = logpath
+ else:
+ self.removeHandler(self.filelog)
+ self.addHandler(self.streamlog)
+ if FLAGS.verbose:
+ self.setLevel(DEBUG)
+ else:
+ self.setLevel(INFO)
+
+
+def handle_exception(type, value, tb):
+ logging.root.critical(str(value), exc_info=(type, value, tb))
+
+
+def reset():
+ """Resets logging handlers. Should be called if FLAGS changes."""
+ for logger in NovaLogger.manager.loggerDict.itervalues():
+ if isinstance(logger, NovaLogger):
+ logger.setup_from_flags()
+
+
+def setup():
+ """Setup nova logging."""
+ if not isinstance(logging.root, NovaRootLogger):
+ logging._acquireLock()
+ for handler in logging.root.handlers:
+ logging.root.removeHandler(handler)
+ logging.root = NovaRootLogger("nova")
+ NovaLogger.root = logging.root
+ NovaLogger.manager.root = logging.root
+ for logger in NovaLogger.manager.loggerDict.itervalues():
+ logger.root = logging.root
+ if isinstance(logger, logging.Logger):
+ NovaLogger.manager._fixupParents(logger)
+ NovaLogger.manager.loggerDict["nova"] = logging.root
+ logging._releaseLock()
+ sys.excepthook = handle_exception
+ reset()
+
+
+root = logging.root
+logging.setLoggerClass(NovaLogger)
+
+
def audit(msg, *args, **kwargs):
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
- if len(logging.root.handlers) == 0:
- basicConfig()
logging.root.log(AUDIT, msg, *args, **kwargs)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 7bdd56d7b..b36dd59cf 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -163,11 +163,22 @@ class NetworkManager(manager.Manager):
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool."""
- raise NotImplementedError()
+ # TODO(vish): when this is called by compute, we can associate compute
+ # with a network, or a cluster of computes with a network
+ # and use that network here with a method like
+ # network_get_by_compute_host
+ network_ref = self.db.network_get_by_bridge(context,
+ FLAGS.flat_network_bridge)
+ address = self.db.fixed_ip_associate_pool(context.elevated(),
+ network_ref['id'],
+ instance_id)
+ self.db.fixed_ip_update(context, address, {'allocated': True})
+ return address
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
"""Returns a fixed ip to the pool."""
- raise NotImplementedError()
+ self.db.fixed_ip_update(context, address, {'allocated': False})
+ self.db.fixed_ip_disassociate(context.elevated(), address)
def setup_fixed_ip(self, context, address):
"""Sets up rules for fixed ip."""
@@ -257,12 +268,58 @@ class NetworkManager(manager.Manager):
def get_network_host(self, context):
"""Get the network host for the current context."""
- raise NotImplementedError()
+ network_ref = self.db.network_get_by_bridge(context,
+ FLAGS.flat_network_bridge)
+ # NOTE(vish): If the network has no host, use the network_host flag.
+ # This could eventually be a a db lookup of some sort, but
+ # a flag is easy to handle for now.
+ host = network_ref['host']
+ if not host:
+ topic = self.db.queue_get_for(context,
+ FLAGS.network_topic,
+ FLAGS.network_host)
+ if FLAGS.fake_call:
+ return self.set_network_host(context, network_ref['id'])
+ host = rpc.call(context,
+ FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"network_id": network_ref['id']}})
+ return host
def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, *args, **kwargs):
+ cidr_v6, label, *args, **kwargs):
"""Create networks based on parameters."""
- raise NotImplementedError()
+ fixed_net = IPy.IP(cidr)
+ fixed_net_v6 = IPy.IP(cidr_v6)
+ significant_bits_v6 = 64
+ count = 1
+ for index in range(num_networks):
+ start = index * network_size
+ significant_bits = 32 - int(math.log(network_size, 2))
+ cidr = "%s/%s" % (fixed_net[start], significant_bits)
+ project_net = IPy.IP(cidr)
+ net = {}
+ net['bridge'] = FLAGS.flat_network_bridge
+ net['dns'] = FLAGS.flat_network_dns
+ net['cidr'] = cidr
+ net['netmask'] = str(project_net.netmask())
+ net['gateway'] = str(project_net[1])
+ net['broadcast'] = str(project_net.broadcast())
+ net['dhcp_start'] = str(project_net[2])
+ if num_networks > 1:
+ net['label'] = "%s_%d" % (label, count)
+ else:
+ net['label'] = label
+ count += 1
+
+ if(FLAGS.use_ipv6):
+ cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
+ net['cidr_v6'] = cidr_v6
+
+ network_ref = self.db.network_create_safe(context, net)
+
+ if network_ref:
+ self._create_fixed_ips(context, network_ref['id'])
@property
def _bottom_reserved_ips(self): # pylint: disable-msg=R0201
@@ -322,83 +379,20 @@ class FlatManager(NetworkManager):
"""
timeout_fixed_ips = False
- def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from the pool."""
- # TODO(vish): when this is called by compute, we can associate compute
- # with a network, or a cluster of computes with a network
- # and use that network here with a method like
- # network_get_by_compute_host
- network_ref = self.db.network_get_by_bridge(context,
- FLAGS.flat_network_bridge)
- address = self.db.fixed_ip_associate_pool(context.elevated(),
- network_ref['id'],
- instance_id)
- self.db.fixed_ip_update(context, address, {'allocated': True})
- return address
-
- def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool."""
- self.db.fixed_ip_update(context, address, {'allocated': False})
- self.db.fixed_ip_disassociate(context.elevated(), address)
+ def init_host(self):
+ """Do any initialization that needs to be run if this is a
+ standalone service.
+ """
+ #Fix for bug 723298 - do not call init_host on superclass
+ #Following code has been copied for NetworkManager.init_host
+ ctxt = context.get_admin_context()
+ for network in self.db.host_get_networks(ctxt, self.host):
+ self._on_set_network_host(ctxt, network['id'])
def setup_compute_network(self, context, instance_id):
"""Network is created manually."""
pass
- def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, label, *args, **kwargs):
- """Create networks based on parameters."""
- fixed_net = IPy.IP(cidr)
- fixed_net_v6 = IPy.IP(cidr_v6)
- significant_bits_v6 = 64
- count = 1
- for index in range(num_networks):
- start = index * network_size
- significant_bits = 32 - int(math.log(network_size, 2))
- cidr = "%s/%s" % (fixed_net[start], significant_bits)
- project_net = IPy.IP(cidr)
- net = {}
- net['bridge'] = FLAGS.flat_network_bridge
- net['cidr'] = cidr
- net['netmask'] = str(project_net.netmask())
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast())
- net['dhcp_start'] = str(project_net[2])
- if num_networks > 1:
- net['label'] = "%s_%d" % (label, count)
- else:
- net['label'] = label
- count += 1
-
- if(FLAGS.use_ipv6):
- cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
- net['cidr_v6'] = cidr_v6
-
- network_ref = self.db.network_create_safe(context, net)
-
- if network_ref:
- self._create_fixed_ips(context, network_ref['id'])
-
- def get_network_host(self, context):
- """Get the network host for the current context."""
- network_ref = self.db.network_get_by_bridge(context,
- FLAGS.flat_network_bridge)
- # NOTE(vish): If the network has no host, use the network_host flag.
- # This could eventually be a a db lookup of some sort, but
- # a flag is easy to handle for now.
- host = network_ref['host']
- if not host:
- topic = self.db.queue_get_for(context,
- FLAGS.network_topic,
- FLAGS.network_host)
- if FLAGS.fake_call:
- return self.set_network_host(context, network_ref['id'])
- host = rpc.call(context,
- FLAGS.network_topic,
- {"method": "set_network_host",
- "args": {"network_id": network_ref['id']}})
- return host
-
def _on_set_network_host(self, context, network_id):
"""Called when this host becomes the host for a network."""
net = {}
@@ -406,8 +400,24 @@ class FlatManager(NetworkManager):
net['dns'] = FLAGS.flat_network_dns
self.db.network_update(context, network_id, net)
+ def allocate_floating_ip(self, context, project_id):
+ #Fix for bug 723298
+ raise NotImplementedError()
+
+ def associate_floating_ip(self, context, floating_address, fixed_address):
+ #Fix for bug 723298
+ raise NotImplementedError()
+
+ def disassociate_floating_ip(self, context, floating_address):
+ #Fix for bug 723298
+ raise NotImplementedError()
+
+ def deallocate_floating_ip(self, context, floating_address):
+ #Fix for bug 723298
+ raise NotImplementedError()
+
-class FlatDHCPManager(FlatManager):
+class FlatDHCPManager(NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
@@ -522,9 +532,9 @@ class VlanManager(NetworkManager):
fixed_net = IPy.IP(cidr)
if fixed_net.len() < num_networks * network_size:
- raise ValueError(_('The network range is not big enough to fit %s'
- ' networks of size %s' %
- (num_networks, network_size)))
+ raise ValueError(_('The network range is not big enough to fit '
+ '%(num_networks)s. Network size is %(network_size)s' %
+ locals()))
fixed_net_v6 = IPy.IP(cidr_v6)
network_size_v6 = 1 << 64
diff --git a/nova/quota.py b/nova/quota.py
index 3884eb308..6b52a97fa 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -35,6 +35,8 @@ flags.DEFINE_integer('quota_gigabytes', 1000,
'number of volume gigabytes allowed per project')
flags.DEFINE_integer('quota_floating_ips', 10,
'number of floating ips allowed per project')
+flags.DEFINE_integer('quota_metadata_items', 128,
+ 'number of metadata items allowed per instance')
def get_quota(context, project_id):
@@ -42,7 +44,8 @@ def get_quota(context, project_id):
'cores': FLAGS.quota_cores,
'volumes': FLAGS.quota_volumes,
'gigabytes': FLAGS.quota_gigabytes,
- 'floating_ips': FLAGS.quota_floating_ips}
+ 'floating_ips': FLAGS.quota_floating_ips,
+ 'metadata_items': FLAGS.quota_metadata_items}
try:
quota = db.quota_get(context, project_id)
for key in rval.keys():
@@ -94,6 +97,15 @@ def allowed_floating_ips(context, num_floating_ips):
return min(num_floating_ips, allowed_floating_ips)
+def allowed_metadata_items(context, num_metadata_items):
+ """Check quota; return min(num_metadata_items,allowed_metadata_items)"""
+ project_id = context.project_id
+ context = context.elevated()
+ quota = get_quota(context, project_id)
+ num_allowed_metadata_items = quota['metadata_items']
+ return min(num_metadata_items, num_allowed_metadata_items)
+
+
class QuotaError(exception.ApiError):
"""Quota Exceeeded"""
pass
diff --git a/nova/rpc.py b/nova/rpc.py
index 205bb524a..8fe4565dd 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -91,18 +91,19 @@ class Consumer(messaging.Consumer):
super(Consumer, self).__init__(*args, **kwargs)
self.failed_connection = False
break
- except: # Catching all because carrot sucks
+ except Exception as e: # Catching all because carrot sucks
fl_host = FLAGS.rabbit_host
fl_port = FLAGS.rabbit_port
fl_intv = FLAGS.rabbit_retry_interval
- LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is"
- " unreachable. Trying again in %(fl_intv)d seconds.")
+ LOG.error(_("AMQP server on %(fl_host)s:%(fl_port)d is"
+ " unreachable: %(e)s. Trying again in %(fl_intv)d"
+ " seconds.")
% locals())
self.failed_connection = True
if self.failed_connection:
- LOG.exception(_("Unable to connect to AMQP server "
- "after %d tries. Shutting down."),
- FLAGS.rabbit_max_retries)
+ LOG.error(_("Unable to connect to AMQP server "
+ "after %d tries. Shutting down."),
+ FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
diff --git a/nova/service.py b/nova/service.py
index 59648adf2..f47358089 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -45,19 +45,10 @@ FLAGS = flags.FLAGS
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to datastore',
lower_bound=1)
-
flags.DEFINE_integer('periodic_interval', 60,
'seconds between running periodic tasks',
lower_bound=1)
-flags.DEFINE_string('pidfile', None,
- 'pidfile to use for this service')
-
-
-flags.DEFINE_flag(flags.HelpFlag())
-flags.DEFINE_flag(flags.HelpshortFlag())
-flags.DEFINE_flag(flags.HelpXMLFlag())
-
class Service(object):
"""Base class for workers that run on hosts."""
@@ -68,6 +59,8 @@ class Service(object):
self.binary = binary
self.topic = topic
self.manager_class_name = manager
+ manager_class = utils.import_class(self.manager_class_name)
+ self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
super(Service, self).__init__(*args, **kwargs)
@@ -75,9 +68,9 @@ class Service(object):
self.timers = []
def start(self):
- manager_class = utils.import_class(self.manager_class_name)
- self.manager = manager_class(host=self.host, *self.saved_args,
- **self.saved_kwargs)
+ vcs_string = version.version_string_with_vcs()
+ logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)"),
+ {'topic': self.topic, 'vcs_string': vcs_string})
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
@@ -157,9 +150,6 @@ class Service(object):
report_interval = FLAGS.report_interval
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
- vcs_string = version.version_string_with_vcs()
- logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)")
- % locals())
service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval)
@@ -181,6 +171,13 @@ class Service(object):
pass
self.timers = []
+ def wait(self):
+ for x in self.timers:
+ try:
+ x.wait()
+ except Exception:
+ pass
+
def periodic_tasks(self):
"""Tasks to be run at a periodic interval"""
self.manager.periodic_tasks(context.get_admin_context())
@@ -214,11 +211,19 @@ class Service(object):
def serve(*services):
- FLAGS(sys.argv)
- logging.basicConfig()
-
- if not services:
- services = [Service.create()]
+ try:
+ if not services:
+ services = [Service.create()]
+ except Exception:
+ logging.exception('in Service.create()')
+ raise
+ finally:
+ # After we've loaded up all our dynamic bits, check
+ # whether we should print help
+ flags.DEFINE_flag(flags.HelpFlag())
+ flags.DEFINE_flag(flags.HelpshortFlag())
+ flags.DEFINE_flag(flags.HelpXMLFlag())
+ FLAGS.ParseNewFlags()
name = '_'.join(x.binary for x in services)
logging.debug(_("Serving %s"), name)
diff --git a/nova/test.py b/nova/test.py
index a12cf9d32..d8a47464f 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -22,10 +22,15 @@ Allows overriding of flags for use of fakes,
and some black magic for inline callbacks.
"""
+
import datetime
+import os
+import shutil
+import uuid
import unittest
import mox
+import shutil
import stubout
from nova import context
@@ -33,13 +38,12 @@ from nova import db
from nova import fakerabbit
from nova import flags
from nova import rpc
-from nova.network import manager as network_manager
-from nova.tests import fake_flags
+from nova import service
FLAGS = flags.FLAGS
-flags.DEFINE_bool('flush_db', True,
- 'Flush the database before running fake tests')
+flags.DEFINE_string('sqlite_clean_db', 'clean.sqlite',
+ 'File name of clean sqlite db')
flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing')
@@ -64,15 +68,8 @@ class TestCase(unittest.TestCase):
# now that we have some required db setup for the system
# to work properly.
self.start = datetime.datetime.utcnow()
- ctxt = context.get_admin_context()
- if db.network_count(ctxt) != 5:
- network_manager.VlanManager().create_networks(ctxt,
- FLAGS.fixed_range,
- 5, 16,
- FLAGS.fixed_range_v6,
- FLAGS.vlan_start,
- FLAGS.vpn_start,
- )
+ shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
+ os.path.join(FLAGS.state_path, FLAGS.sqlite_db))
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
@@ -80,6 +77,7 @@ class TestCase(unittest.TestCase):
self.stubs = stubout.StubOutForTesting()
self.flag_overrides = {}
self.injected = []
+ self._services = []
self._monkey_patch_attach()
self._original_flags = FLAGS.FlagValuesDict()
@@ -91,25 +89,31 @@ class TestCase(unittest.TestCase):
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
- # NOTE(vish): Clean up any ips associated during the test.
- ctxt = context.get_admin_context()
- db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host,
- self.start)
- db.network_disassociate_all(ctxt)
+ super(TestCase, self).tearDown()
+ finally:
+ # Clean out fake_rabbit's queue if we used it
+ if FLAGS.fake_rabbit:
+ fakerabbit.reset_all()
+
+ # Reset any overriden flags
+ self.reset_flags()
+
+ # Reset our monkey-patches
rpc.Consumer.attach_to_eventlet = self.originalAttach
+
+ # Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
- if FLAGS.fake_rabbit:
- fakerabbit.reset_all()
-
- db.security_group_destroy_all(ctxt)
- super(TestCase, self).tearDown()
- finally:
- self.reset_flags()
+ # Kill any services
+ for x in self._services:
+ try:
+ x.kill()
+ except Exception:
+ pass
def flags(self, **kw):
"""Override flag variables for a test"""
@@ -127,6 +131,15 @@ class TestCase(unittest.TestCase):
for k, v in self._original_flags.iteritems():
setattr(FLAGS, k, v)
+ def start_service(self, name, host=None, **kwargs):
+ host = host and host or uuid.uuid4().hex
+ kwargs.setdefault('host', host)
+ kwargs.setdefault('binary', 'nova-%s' % name)
+ svc = service.Service.create(**kwargs)
+ svc.start()
+ self._services.append(svc)
+ return svc
+
def _monkey_patch_attach(self):
self.originalAttach = rpc.Consumer.attach_to_eventlet
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index 592d5bea9..7fba02a93 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -37,5 +37,30 @@ setattr(__builtin__, '_', lambda x: x)
def setup():
+ import os
+ import shutil
+
+ from nova import context
+ from nova import flags
from nova.db import migration
+ from nova.network import manager as network_manager
+ from nova.tests import fake_flags
+
+ FLAGS = flags.FLAGS
+
+ testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
+ if os.path.exists(testdb):
+ os.unlink(testdb)
migration.db_sync()
+ ctxt = context.get_admin_context()
+ network_manager.VlanManager().create_networks(ctxt,
+ FLAGS.fixed_range,
+ FLAGS.num_networks,
+ FLAGS.network_size,
+ FLAGS.fixed_range_v6,
+ FLAGS.vlan_start,
+ FLAGS.vpn_start,
+ )
+
+ cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db)
+ shutil.copyfile(testdb, cleandb)
diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py
index 77b1dd37f..e18120285 100644
--- a/nova/tests/api/openstack/__init__.py
+++ b/nova/tests/api/openstack/__init__.py
@@ -16,7 +16,7 @@
# under the License.
import webob.dec
-import unittest
+from nova import test
from nova import context
from nova import flags
@@ -33,7 +33,7 @@ def simple_wsgi(req):
return ""
-class RateLimitingMiddlewareTest(unittest.TestCase):
+class RateLimitingMiddlewareTest(test.TestCase):
def test_get_action_name(self):
middleware = RateLimitingMiddleware(simple_wsgi)
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index fb282f1c9..49ce8c1b5 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -188,7 +188,11 @@ def stub_out_glance(stubs, initial_fixtures=None):
class FakeToken(object):
+ id = 0
+
def __init__(self, **kwargs):
+ FakeToken.id += 1
+ self.id = FakeToken.id
for k, v in kwargs.iteritems():
setattr(self, k, v)
@@ -203,19 +207,22 @@ class FakeAuthDatabase(object):
data = {}
@staticmethod
- def auth_get_token(context, token_hash):
+ def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
- def auth_create_token(context, token):
+ def auth_token_create(context, token):
fake_token = FakeToken(created_at=datetime.datetime.now(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
+ FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
- def auth_destroy_token(context, token):
- if token.token_hash in FakeAuthDatabase.data:
- del FakeAuthDatabase.data['token_hash']
+ def auth_token_destroy(context, token_id):
+ token = FakeAuthDatabase.data.get('id_%i' % token_id)
+ if token and token.token_hash in FakeAuthDatabase.data:
+ del FakeAuthDatabase.data[token.token_hash]
+ del FakeAuthDatabase.data['id_%i' % token_id]
class FakeAuthManager(object):
diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py
index 73120c31d..dfce1b127 100644
--- a/nova/tests/api/openstack/test_adminapi.py
+++ b/nova/tests/api/openstack/test_adminapi.py
@@ -15,13 +15,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import stubout
import webob
from paste import urlmap
from nova import flags
+from nova import test
from nova.api import openstack
from nova.api.openstack import ratelimiting
from nova.api.openstack import auth
@@ -30,9 +30,10 @@ from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
-class AdminAPITest(unittest.TestCase):
+class AdminAPITest(test.TestCase):
def setUp(self):
+ super(AdminAPITest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
@@ -44,6 +45,7 @@ class AdminAPITest(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
+ super(AdminAPITest, self).tearDown()
def test_admin_enabled(self):
FLAGS.allow_admin_api = True
@@ -58,8 +60,5 @@ class AdminAPITest(unittest.TestCase):
# We should still be able to access public operations.
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
# TODO: Confirm admin operations are unavailable.
-
-if __name__ == '__main__':
- unittest.main()
+ self.assertEqual(res.status_int, 200)
diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py
index db0fe1060..5112c486f 100644
--- a/nova/tests/api/openstack/test_api.py
+++ b/nova/tests/api/openstack/test_api.py
@@ -15,17 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import webob.exc
import webob.dec
from webob import Request
+from nova import test
from nova.api import openstack
from nova.api.openstack import faults
-class APITest(unittest.TestCase):
+class APITest(test.TestCase):
def _wsgi_app(self, inner_app):
# simpler version of the app than fakes.wsgi_app
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index 0dd65d321..ff8d42a14 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -16,7 +16,6 @@
# under the License.
import datetime
-import unittest
import stubout
import webob
@@ -27,12 +26,15 @@ import nova.api.openstack.auth
import nova.auth.manager
from nova import auth
from nova import context
+from nova import db
+from nova import test
from nova.tests.api.openstack import fakes
-class Test(unittest.TestCase):
+class Test(test.TestCase):
def setUp(self):
+ super(Test, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
@@ -45,6 +47,7 @@ class Test(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
fakes.fake_data_store = {}
+ super(Test, self).tearDown()
def test_authorize_user(self):
f = fakes.FakeAuthManager()
@@ -97,10 +100,10 @@ class Test(unittest.TestCase):
token_hash=token_hash,
created_at=datetime.datetime(1990, 1, 1))
- self.stubs.Set(fakes.FakeAuthDatabase, 'auth_destroy_token',
+ self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_destroy',
destroy_token_mock)
- self.stubs.Set(fakes.FakeAuthDatabase, 'auth_get_token',
+ self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_get',
bad_token)
req = webob.Request.blank('/v1.0/')
@@ -128,8 +131,36 @@ class Test(unittest.TestCase):
self.assertEqual(result.status, '401 Unauthorized')
-class TestLimiter(unittest.TestCase):
+class TestFunctional(test.TestCase):
+ def test_token_expiry(self):
+ ctx = context.get_admin_context()
+ tok = db.auth_token_create(ctx, dict(
+ token_hash='bacon',
+ cdn_management_url='',
+ server_management_url='',
+ storage_url='',
+ user_id='ham',
+ ))
+
+ db.auth_token_update(ctx, tok.token_hash, dict(
+ created_at=datetime.datetime(2000, 1, 1, 12, 0, 0),
+ ))
+
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-Token'] = 'bacon'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+ def test_token_doesnotexist(self):
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-Token'] = 'ham'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+
+class TestLimiter(test.TestCase):
def setUp(self):
+ super(TestLimiter, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
@@ -141,6 +172,7 @@ class TestLimiter(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
fakes.fake_data_store = {}
+ super(TestLimiter, self).tearDown()
def test_authorize_token(self):
f = fakes.FakeAuthManager()
@@ -161,7 +193,3 @@ class TestLimiter(unittest.TestCase):
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '200 OK')
self.assertEqual(result.headers['X-Test-Success'], 'True')
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 9d9837cc9..59d850157 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -19,14 +19,14 @@
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
-import unittest
from webob import Request
+from nova import test
from nova.api.openstack.common import limited
-class LimiterTest(unittest.TestCase):
+class LimiterTest(test.TestCase):
"""
Unit tests for the `nova.api.openstack.common.limited` method which takes
in a list of items and, depending on the 'offset' and 'limit' GET params,
@@ -37,6 +37,7 @@ class LimiterTest(unittest.TestCase):
"""
Run before each test.
"""
+ super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py
index fda2b5ede..7667753f4 100644
--- a/nova/tests/api/openstack/test_faults.py
+++ b/nova/tests/api/openstack/test_faults.py
@@ -15,15 +15,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import webob
import webob.dec
import webob.exc
+from nova import test
from nova.api.openstack import faults
-class TestFaults(unittest.TestCase):
+class TestFaults(test.TestCase):
def test_fault_parts(self):
req = webob.Request.blank('/.xml')
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index 1bdaea161..761265965 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -15,18 +15,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
-
import stubout
import webob
+from nova import test
import nova.api
from nova.api.openstack import flavors
from nova.tests.api.openstack import fakes
-class FlavorsTest(unittest.TestCase):
+class FlavorsTest(test.TestCase):
def setUp(self):
+ super(FlavorsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
@@ -36,6 +36,7 @@ class FlavorsTest(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
+ super(FlavorsTest, self).tearDown()
def test_get_flavor_list(self):
req = webob.Request.blank('/v1.0/flavors')
@@ -43,6 +44,3 @@ class FlavorsTest(unittest.TestCase):
def test_get_flavor_by_id(self):
pass
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 8ab4d7569..e232bc3d5 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -22,7 +22,6 @@ and as a WSGI layer
import json
import datetime
-import unittest
import stubout
import webob
@@ -30,6 +29,7 @@ import webob
from nova import context
from nova import exception
from nova import flags
+from nova import test
from nova import utils
import nova.api.openstack
from nova.api.openstack import images
@@ -130,12 +130,13 @@ class BaseImageServiceTests(object):
self.assertEquals(1, num_images)
-class LocalImageServiceTest(unittest.TestCase,
+class LocalImageServiceTest(test.TestCase,
BaseImageServiceTests):
"""Tests the local image service"""
def setUp(self):
+ super(LocalImageServiceTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
service_class = 'nova.image.local.LocalImageService'
self.service = utils.import_object(service_class)
@@ -145,14 +146,16 @@ class LocalImageServiceTest(unittest.TestCase,
self.service.delete_all()
self.service.delete_imagedir()
self.stubs.UnsetAll()
+ super(LocalImageServiceTest, self).tearDown()
-class GlanceImageServiceTest(unittest.TestCase,
+class GlanceImageServiceTest(test.TestCase,
BaseImageServiceTests):
"""Tests the local image service"""
def setUp(self):
+ super(GlanceImageServiceTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_glance(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
@@ -163,9 +166,10 @@ class GlanceImageServiceTest(unittest.TestCase,
def tearDown(self):
self.stubs.UnsetAll()
+ super(GlanceImageServiceTest, self).tearDown()
-class ImageControllerWithGlanceServiceTest(unittest.TestCase):
+class ImageControllerWithGlanceServiceTest(test.TestCase):
"""Test of the OpenStack API /images application controller"""
@@ -194,6 +198,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
'image_type': 'ramdisk'}]
def setUp(self):
+ super(ImageControllerWithGlanceServiceTest, self).setUp()
self.orig_image_service = FLAGS.image_service
FLAGS.image_service = 'nova.image.glance.GlanceImageService'
self.stubs = stubout.StubOutForTesting()
@@ -208,6 +213,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.image_service = self.orig_image_service
+ super(ImageControllerWithGlanceServiceTest, self).tearDown()
def test_get_image_index(self):
req = webob.Request.blank('/v1.0/images')
diff --git a/nova/tests/api/openstack/test_ratelimiting.py b/nova/tests/api/openstack/test_ratelimiting.py
index 4c9d6bc23..9ae90ee20 100644
--- a/nova/tests/api/openstack/test_ratelimiting.py
+++ b/nova/tests/api/openstack/test_ratelimiting.py
@@ -1,15 +1,16 @@
import httplib
import StringIO
import time
-import unittest
import webob
+from nova import test
import nova.api.openstack.ratelimiting as ratelimiting
-class LimiterTest(unittest.TestCase):
+class LimiterTest(test.TestCase):
def setUp(self):
+ super(LimiterTest, self).setUp()
self.limits = {
'a': (5, ratelimiting.PER_SECOND),
'b': (5, ratelimiting.PER_MINUTE),
@@ -83,9 +84,10 @@ class FakeLimiter(object):
return self._delay
-class WSGIAppTest(unittest.TestCase):
+class WSGIAppTest(test.TestCase):
def setUp(self):
+ super(WSGIAppTest, self).setUp()
self.limiter = FakeLimiter(self)
self.app = ratelimiting.WSGIApp(self.limiter)
@@ -206,7 +208,7 @@ def wire_HTTPConnection_to_WSGI(host, app):
httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
-class WSGIAppProxyTest(unittest.TestCase):
+class WSGIAppProxyTest(test.TestCase):
def setUp(self):
"""Our WSGIAppProxy is going to call across an HTTPConnection to a
@@ -218,6 +220,7 @@ class WSGIAppProxyTest(unittest.TestCase):
at the WSGIApp. And the limiter isn't real -- it's a fake that
behaves the way we tell it to.
"""
+ super(WSGIAppProxyTest, self).setUp()
self.limiter = FakeLimiter(self)
app = ratelimiting.WSGIApp(self.limiter)
wire_HTTPConnection_to_WSGI('100.100.100.100:80', app)
@@ -238,7 +241,3 @@ class WSGIAppProxyTest(unittest.TestCase):
self.limiter.mock('murder', 'brutus', None)
self.proxy.perform('stab', 'brutus')
self.assertRaises(AssertionError, shouldRaise)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index a7be0796e..78beb7df9 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -17,17 +17,18 @@
import datetime
import json
-import unittest
import stubout
import webob
from nova import db
from nova import flags
+from nova import test
import nova.api.openstack
from nova.api.openstack import servers
import nova.db.api
from nova.db.sqlalchemy.models import Instance
+from nova.db.sqlalchemy.models import InstanceMetadata
import nova.rpc
from nova.tests.api.openstack import fakes
@@ -64,6 +65,9 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
+ metadata = []
+ metadata.append(InstanceMetadata(key='seq', value=id))
+
if public_addresses == None:
public_addresses = list()
@@ -84,7 +88,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
"vcpus": 0,
"local_gb": 0,
"hostname": "",
- "host": "",
+ "host": None,
"instance_type": "",
"user_data": "",
"reservation_id": "",
@@ -95,7 +99,8 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
"availability_zone": "",
"display_name": "server%s" % id,
"display_description": "",
- "locked": False}
+ "locked": False,
+ "metadata": metadata}
instance["fixed_ip"] = {
"address": private_address,
@@ -108,9 +113,10 @@ def fake_compute_api(cls, req, id):
return True
-class ServersTest(unittest.TestCase):
+class ServersTest(test.TestCase):
def setUp(self):
+ super(ServersTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
@@ -141,6 +147,7 @@ class ServersTest(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
+ super(ServersTest, self).tearDown()
def test_get_server_by_id(self):
req = webob.Request.blank('/v1.0/servers/1')
@@ -214,7 +221,8 @@ class ServersTest(unittest.TestCase):
"get_image_id_from_image_hash", image_id_from_hash)
body = dict(server=dict(
- name='server_test', imageId=2, flavorId=2, metadata={},
+ name='server_test', imageId=2, flavorId=2,
+ metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = webob.Request.blank('/v1.0/servers')
req.method = 'POST'
@@ -289,10 +297,45 @@ class ServersTest(unittest.TestCase):
i = 0
for s in res_dict['servers']:
self.assertEqual(s['id'], i)
+ self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % i)
self.assertEqual(s['imageId'], 10)
+ self.assertEqual(s['metadata']['seq'], i)
i += 1
+ def test_get_all_server_details_with_host(self):
+ '''
+ We want to make sure that if two instances are on the same host, then
+ they return the same hostId. If two instances are on different hosts,
+ they should return different hostId's. In this test, there are 5
+ instances - 2 on one host and 3 on another.
+ '''
+
+ def stub_instance(id, user_id=1):
+ return Instance(id=id, state=0, image_id=10, user_id=user_id,
+ display_name='server%s' % id, host='host%s' % (id % 2))
+
+ def return_servers_with_host(context, user_id=1):
+ return [stub_instance(i) for i in xrange(5)]
+
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
+ return_servers_with_host)
+
+ req = webob.Request.blank('/v1.0/servers/detail')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ server_list = res_dict['servers']
+ host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
+ self.assertTrue(host_ids[0] and host_ids[1])
+ self.assertNotEqual(host_ids[0], host_ids[1])
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], i)
+ self.assertEqual(s['hostId'], host_ids[i % 2])
+ self.assertEqual(s['name'], 'server%d' % i)
+ self.assertEqual(s['imageId'], 10)
+
def test_server_pause(self):
FLAGS.allow_admin_api = True
body = dict(server=dict(
@@ -353,6 +396,18 @@ class ServersTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
+ def test_server_inject_network_info(self):
+ FLAGS.allow_admin_api = True
+ body = dict(server=dict(
+ name='server_test', imageId=2, flavorId=2, metadata={},
+ personality={}))
+ req = webob.Request.blank('/v1.0/servers/1/inject_network_info')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
def test_server_diagnostics(self):
req = webob.Request.blank("/v1.0/servers/1/diagnostics")
req.method = "GET"
@@ -410,7 +465,3 @@ class ServersTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status, '202 Accepted')
self.assertEqual(self.server_delete_called, True)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/nova/tests/api/openstack/test_shared_ip_groups.py b/nova/tests/api/openstack/test_shared_ip_groups.py
index c2fc3a203..b4de2ef41 100644
--- a/nova/tests/api/openstack/test_shared_ip_groups.py
+++ b/nova/tests/api/openstack/test_shared_ip_groups.py
@@ -15,19 +15,20 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
-
import stubout
+from nova import test
from nova.api.openstack import shared_ip_groups
-class SharedIpGroupsTest(unittest.TestCase):
+class SharedIpGroupsTest(test.TestCase):
def setUp(self):
+ super(SharedIpGroupsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.stubs.UnsetAll()
+ super(SharedIpGroupsTest, self).tearDown()
def test_get_shared_ip_groups(self):
pass
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index df497ef1b..555b206b9 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import stubout
import webob
@@ -22,6 +21,7 @@ import json
import nova.db
from nova import context
from nova import flags
+from nova import test
from nova.api.openstack import zones
from nova.tests.api.openstack import fakes
@@ -60,8 +60,9 @@ def zone_get_all(context):
password='qwerty')]
-class ZonesTest(unittest.TestCase):
+class ZonesTest(test.TestCase):
def setUp(self):
+ super(ZonesTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
@@ -81,6 +82,7 @@ class ZonesTest(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
+ super(ZonesTest, self).tearDown()
def test_get_zone_list(self):
req = webob.Request.blank('/v1.0/zones')
@@ -134,7 +136,3 @@ class ZonesTest(unittest.TestCase):
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
self.assertFalse('username' in res_dict['zone'])
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py
index 44e2d615c..2c7852214 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/api/test_wsgi.py
@@ -21,7 +21,7 @@
Test WSGI basics and provide some helper functions for other WSGI tests.
"""
-import unittest
+from nova import test
import routes
import webob
@@ -29,7 +29,7 @@ import webob
from nova import wsgi
-class Test(unittest.TestCase):
+class Test(test.TestCase):
def test_debug(self):
@@ -92,7 +92,7 @@ class Test(unittest.TestCase):
self.assertNotEqual(result.body, "123")
-class SerializerTest(unittest.TestCase):
+class SerializerTest(test.TestCase):
def match(self, url, accept, expect):
input_dict = dict(servers=dict(a=(2, 3)))
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 1097488ec..cbd949477 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -29,8 +29,8 @@ FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('fake_network', 'nova.network.manager')
-FLAGS.network_size = 16
-FLAGS.num_networks = 5
+FLAGS.network_size = 8
+FLAGS.num_networks = 2
FLAGS.fake_network = True
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
@@ -39,5 +39,5 @@ FLAGS.num_shelves = 2
FLAGS.blades_per_shelf = 4
FLAGS.iscsi_num_targets = 8
FLAGS.verbose = True
-FLAGS.sql_connection = 'sqlite:///nova.sqlite'
+FLAGS.sqlite_db = "tests.sqlite"
FLAGS.use_ipv6 = True
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
index f182b857a..3ff8d7ce5 100644
--- a/nova/tests/glance/stubs.py
+++ b/nova/tests/glance/stubs.py
@@ -26,12 +26,40 @@ def stubout_glance_client(stubs, cls):
class FakeGlance(object):
+ IMAGE_MACHINE = 1
+ IMAGE_KERNEL = 2
+ IMAGE_RAMDISK = 3
+ IMAGE_RAW = 4
+ IMAGE_VHD = 5
+
+ IMAGE_FIXTURES = {
+ IMAGE_MACHINE: {
+ 'image_meta': {'name': 'fakemachine', 'size': 0,
+ 'type': 'machine'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_KERNEL: {
+ 'image_meta': {'name': 'fakekernel', 'size': 0,
+ 'type': 'kernel'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_RAMDISK: {
+ 'image_meta': {'name': 'fakeramdisk', 'size': 0,
+ 'type': 'ramdisk'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_RAW: {
+ 'image_meta': {'name': 'fakeraw', 'size': 0,
+ 'type': 'raw'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_VHD: {
+ 'image_meta': {'name': 'fakevhd', 'size': 0,
+ 'type': 'vhd'},
+ 'image_data': StringIO.StringIO('')}}
+
def __init__(self, host, port=None, use_ssl=False):
pass
- def get_image(self, image):
- meta = {
- 'size': 0,
- }
- image_file = StringIO.StringIO('')
- return meta, image_file
+ def get_image_meta(self, image_id):
+ return self.IMAGE_FIXTURES[image_id]['image_meta']
+
+ def get_image(self, image_id):
+ image = self.IMAGE_FIXTURES[image_id]
+ return image['image_meta'], image['image_data']
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index da86e6e11..5a1be08eb 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -311,4 +311,5 @@ class S3APITestCase(test.TestCase):
self.auth_manager.delete_user('admin')
self.auth_manager.delete_project('admin')
stop_listening = defer.maybeDeferred(self.listening_port.stopListening)
+ super(S3APITestCase, self).tearDown()
return defer.DeferredList([stop_listening])
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index fa27825cd..d5c54a1c3 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -20,6 +20,7 @@
import boto
from boto.ec2 import regioninfo
+import datetime
import httplib
import random
import StringIO
@@ -127,6 +128,28 @@ class ApiEc2TestCase(test.TestCase):
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
+ def test_return_valid_isoformat(self):
+ """
+ Ensure that the ec2 api returns datetime in xs:dateTime
+ (which apparently isn't datetime.isoformat())
+ NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
+ """
+ conv = apirequest._database_to_isoformat
+ # sqlite database representation with microseconds
+ time_to_convert = datetime.datetime.strptime(
+ "2011-02-21 20:14:10.634276",
+ "%Y-%m-%d %H:%M:%S.%f")
+ self.assertEqual(
+ conv(time_to_convert),
+ '2011-02-21T20:14:10Z')
+ # mysqlite database representation
+ time_to_convert = datetime.datetime.strptime(
+ "2011-02-21 19:56:18",
+ "%Y-%m-%d %H:%M:%S")
+ self.assertEqual(
+ conv(time_to_convert),
+ '2011-02-21T19:56:18Z')
+
def test_xmlns_version_matches_request_version(self):
self.expect_http(api_version='2010-10-30')
self.mox.ReplayAll()
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index 35ffffb67..2a7817032 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -327,15 +327,6 @@ class AuthManagerTestCase(object):
class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
- def __init__(self, *args, **kwargs):
- AuthManagerTestCase.__init__(self)
- test.TestCase.__init__(self, *args, **kwargs)
- import nova.auth.fakeldap as fakeldap
- if FLAGS.flush_db:
- LOG.info("Flushing datastore")
- r = fakeldap.Store.instance()
- r.flushdb()
-
class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase):
auth_driver = 'nova.auth.dbdriver.DbDriver'
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 445cc6e8b..061910013 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -65,18 +65,21 @@ class CloudTestCase(test.TestCase):
self.cloud = cloud.CloudController()
# set up services
- self.compute = service.Service.create(binary='nova-compute')
- self.compute.start()
- self.network = service.Service.create(binary='nova-network')
- self.network.start()
+ self.compute = self.start_service('compute')
+ self.scheduter = self.start_service('scheduler')
+ self.network = self.start_service('network')
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user,
project=self.project)
+ host = self.network.get_network_host(self.context.elevated())
def tearDown(self):
+ network_ref = db.project_get_network(self.context,
+ self.project.id)
+ db.network_disassociate(self.context, network_ref['id'])
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
self.compute.kill()
@@ -102,7 +105,7 @@ class CloudTestCase(test.TestCase):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
- 'host': FLAGS.host})
+ 'host': self.network.host})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.cloud.release_address(self.context,
@@ -115,9 +118,9 @@ class CloudTestCase(test.TestCase):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
- 'host': FLAGS.host})
+ 'host': self.network.host})
self.cloud.allocate_address(self.context)
- inst = db.instance_create(self.context, {'host': FLAGS.host})
+ inst = db.instance_create(self.context, {'host': self.compute.host})
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
ec2_id = cloud.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
@@ -133,6 +136,22 @@ class CloudTestCase(test.TestCase):
db.instance_destroy(self.context, inst['id'])
db.floating_ip_destroy(self.context, address)
+ def test_describe_security_groups(self):
+ """Makes sure describe_security_groups works and filters results."""
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context)
+ # NOTE(vish): should have the default group as well
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ result = self.cloud.describe_security_groups(self.context,
+ group_name=[sec['name']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ db.security_group_destroy(self.context, sec['id'])
+
def test_describe_volumes(self):
"""Makes sure describe_volumes works and filters results."""
vol1 = db.volume_create(self.context, {})
@@ -203,27 +222,32 @@ class CloudTestCase(test.TestCase):
'instance_type': instance_type,
'max_count': max_count}
rv = self.cloud.run_instances(self.context, **kwargs)
+ greenthread.sleep(0.3)
instance_id = rv['instancesSet'][0]['instanceId']
output = self.cloud.get_console_output(context=self.context,
- instance_id=[instance_id])
+ instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
rv = self.cloud.terminate_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
def test_ajax_console(self):
+ image_id = FLAGS.default_image
kwargs = {'image_id': image_id}
- rv = yield self.cloud.run_instances(self.context, **kwargs)
+ rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
- output = yield self.cloud.get_console_output(context=self.context,
- instance_id=[instance_id])
- self.assertEquals(b64decode(output['output']),
- 'http://fakeajaxconsole.com/?token=FAKETOKEN')
+ greenthread.sleep(0.3)
+ output = self.cloud.get_ajax_console(context=self.context,
+ instance_id=[instance_id])
+ self.assertEquals(output['url'],
+ '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url)
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
- rv = yield self.cloud.terminate_instances(self.context, [instance_id])
+ rv = self.cloud.terminate_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
def test_key_generation(self):
result = self._create_key('test')
@@ -286,70 +310,6 @@ class CloudTestCase(test.TestCase):
LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id)
- def test_describe_instances(self):
- """Makes sure describe_instances works."""
- instance1 = db.instance_create(self.context, {'host': 'host2'})
- comp1 = db.service_create(self.context, {'host': 'host2',
- 'availability_zone': 'zone1',
- 'topic': "compute"})
- result = self.cloud.describe_instances(self.context)
- self.assertEqual(result['reservationSet'][0]
- ['instancesSet'][0]
- ['placement']['availabilityZone'], 'zone1')
- db.instance_destroy(self.context, instance1['id'])
- db.service_destroy(self.context, comp1['id'])
-
- def test_instance_update_state(self):
- # TODO(termie): what is this code even testing?
- def instance(num):
- return {
- 'reservation_id': 'r-1',
- 'instance_id': 'i-%s' % num,
- 'image_id': 'ami-%s' % num,
- 'private_dns_name': '10.0.0.%s' % num,
- 'dns_name': '10.0.0%s' % num,
- 'ami_launch_index': str(num),
- 'instance_type': 'fake',
- 'availability_zone': 'fake',
- 'key_name': None,
- 'kernel_id': 'fake',
- 'ramdisk_id': 'fake',
- 'groups': ['default'],
- 'product_codes': None,
- 'state': 0x01,
- 'user_data': ''}
- rv = self.cloud._format_describe_instances(self.context)
- logging.error(str(rv))
- self.assertEqual(len(rv['reservationSet']), 0)
-
- # simulate launch of 5 instances
- # self.cloud.instances['pending'] = {}
- #for i in xrange(5):
- # inst = instance(i)
- # self.cloud.instances['pending'][inst['instance_id']] = inst
-
- #rv = self.cloud._format_instances(self.admin)
- #self.assert_(len(rv['reservationSet']) == 1)
- #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
- # report 4 nodes each having 1 of the instances
- #for i in xrange(4):
- # self.cloud.update_state('instances',
- # {('node-%s' % i): {('i-%s' % i):
- # instance(i)}})
-
- # one instance should be pending still
- #self.assert_(len(self.cloud.instances['pending'].keys()) == 1)
-
- # check that the reservations collapse
- #rv = self.cloud._format_instances(self.admin)
- #self.assert_(len(rv['reservationSet']) == 1)
- #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
-
- # check that we can get metadata for each instance
- #for i in xrange(4):
- # data = self.cloud.get_metadata(instance(i)['private_dns_name'])
- # self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i)
-
@staticmethod
def _fake_set_image_description(ctxt, image_id, description):
from nova.objectstore import handler
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index b049ac943..949b5e6eb 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -30,6 +30,7 @@ from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
+from nova.compute import instance_types
LOG = logging.getLogger('nova.tests.compute')
@@ -266,3 +267,10 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(ret_val, None)
self.compute.terminate_instance(self.context, instance_id)
+
+ def test_get_by_flavor_id(self):
+ type = instance_types.get_by_flavor_id(1)
+ self.assertEqual(type, 'm1.tiny')
+
+ type = instance_types.get_by_flavor_id("1")
+ self.assertEqual(type, 'm1.tiny')
diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py
index 85bf94458..49ff24413 100644
--- a/nova/tests/test_console.py
+++ b/nova/tests/test_console.py
@@ -21,7 +21,6 @@ Tests For Console proxy.
"""
import datetime
-import logging
from nova import context
from nova import db
@@ -38,7 +37,6 @@ FLAGS = flags.FLAGS
class ConsoleTestCase(test.TestCase):
"""Test case for console proxy"""
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
stub_compute=True)
diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py
index 8a74b2296..b6bfab534 100644
--- a/nova/tests/test_direct.py
+++ b/nova/tests/test_direct.py
@@ -19,7 +19,6 @@
"""Tests for Direct API."""
import json
-import logging
import webob
@@ -53,6 +52,7 @@ class DirectTestCase(test.TestCase):
def tearDown(self):
direct.ROUTES = {}
+ super(DirectTestCase, self).tearDown()
def test_delegated_auth(self):
req = webob.Request.blank('/fake/context')
diff --git a/nova/tests/test_localization.py b/nova/tests/test_localization.py
index 6992773f5..393d71038 100644
--- a/nova/tests/test_localization.py
+++ b/nova/tests/test_localization.py
@@ -15,7 +15,6 @@
# under the License.
import glob
-import logging
import os
import re
import sys
diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py
index c2c9d7772..122351ff6 100644
--- a/nova/tests/test_log.py
+++ b/nova/tests/test_log.py
@@ -1,9 +1,12 @@
import cStringIO
from nova import context
+from nova import flags
from nova import log
from nova import test
+FLAGS = flags.FLAGS
+
def _fake_context():
return context.RequestContext(1, 1)
@@ -14,15 +17,11 @@ class RootLoggerTestCase(test.TestCase):
super(RootLoggerTestCase, self).setUp()
self.log = log.logging.root
- def tearDown(self):
- super(RootLoggerTestCase, self).tearDown()
- log.NovaLogger.manager.loggerDict = {}
-
def test_is_nova_instance(self):
self.assert_(isinstance(self.log, log.NovaLogger))
- def test_name_is_nova_root(self):
- self.assertEqual("nova.root", self.log.name)
+ def test_name_is_nova(self):
+ self.assertEqual("nova", self.log.name)
def test_handlers_have_nova_formatter(self):
formatters = []
@@ -45,25 +44,36 @@ class RootLoggerTestCase(test.TestCase):
log.audit("foo", context=_fake_context())
self.assert_(True) # didn't raise exception
+ def test_will_be_verbose_if_verbose_flag_set(self):
+ self.flags(verbose=True)
+ log.reset()
+ self.assertEqual(log.DEBUG, self.log.level)
+
+ def test_will_not_be_verbose_if_verbose_flag_not_set(self):
+ self.flags(verbose=False)
+ log.reset()
+ self.assertEqual(log.INFO, self.log.level)
+
class LogHandlerTestCase(test.TestCase):
def test_log_path_logdir(self):
- self.flags(logdir='/some/path')
- self.assertEquals(log.get_log_file_path(binary='foo-bar'),
+ self.flags(logdir='/some/path', logfile=None)
+ self.assertEquals(log._get_log_file_path(binary='foo-bar'),
'/some/path/foo-bar.log')
def test_log_path_logfile(self):
self.flags(logfile='/some/path/foo-bar.log')
- self.assertEquals(log.get_log_file_path(binary='foo-bar'),
+ self.assertEquals(log._get_log_file_path(binary='foo-bar'),
'/some/path/foo-bar.log')
def test_log_path_none(self):
- self.assertTrue(log.get_log_file_path(binary='foo-bar') is None)
+ self.flags(logdir=None, logfile=None)
+ self.assertTrue(log._get_log_file_path(binary='foo-bar') is None)
def test_log_path_logfile_overrides_logdir(self):
self.flags(logdir='/some/other/path',
logfile='/some/path/foo-bar.log')
- self.assertEquals(log.get_log_file_path(binary='foo-bar'),
+ self.assertEquals(log._get_log_file_path(binary='foo-bar'),
'/some/path/foo-bar.log')
@@ -76,13 +86,15 @@ class NovaFormatterTestCase(test.TestCase):
logging_debug_format_suffix="--DBG")
self.log = log.logging.root
self.stream = cStringIO.StringIO()
- handler = log.StreamHandler(self.stream)
- self.log.addHandler(handler)
+ self.handler = log.StreamHandler(self.stream)
+ self.log.addHandler(self.handler)
+ self.level = self.log.level
self.log.setLevel(log.DEBUG)
def tearDown(self):
+ self.log.setLevel(self.level)
+ self.log.removeHandler(self.handler)
super(NovaFormatterTestCase, self).tearDown()
- log.NovaLogger.manager.loggerDict = {}
def test_uncontextualized_log(self):
self.log.info("foo")
@@ -102,30 +114,15 @@ class NovaFormatterTestCase(test.TestCase):
class NovaLoggerTestCase(test.TestCase):
def setUp(self):
super(NovaLoggerTestCase, self).setUp()
- self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False)
+ levels = FLAGS.default_log_levels
+ levels.append("nova-test=AUDIT")
+ self.flags(default_log_levels=levels,
+ verbose=True)
self.log = log.getLogger('nova-test')
- def tearDown(self):
- super(NovaLoggerTestCase, self).tearDown()
- log.NovaLogger.manager.loggerDict = {}
-
def test_has_level_from_flags(self):
self.assertEqual(log.AUDIT, self.log.level)
def test_child_log_has_level_of_parent_flag(self):
l = log.getLogger('nova-test.foo')
self.assertEqual(log.AUDIT, l.level)
-
-
-class VerboseLoggerTestCase(test.TestCase):
- def setUp(self):
- super(VerboseLoggerTestCase, self).setUp()
- self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True)
- self.log = log.getLogger('nova.test')
-
- def tearDown(self):
- super(VerboseLoggerTestCase, self).tearDown()
- log.NovaLogger.manager.loggerDict = {}
-
- def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self):
- self.assertEqual(log.DEBUG, self.log.level)
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index 33c1777d5..e6da6112a 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -46,6 +46,8 @@ class ProjectTestCase(test.TestCase):
missing = set()
for contributor in contributors:
+ if contributor == 'nova-core':
+ continue
if not contributor in authors_file:
missing.add(contributor)
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 00f9323f3..ce1c77210 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -42,15 +42,13 @@ class NetworkTestCase(test.TestCase):
# flags in the corresponding section in nova-dhcpbridge
self.flags(connection_type='fake',
fake_call=True,
- fake_network=True,
- network_size=16,
- num_networks=5)
+ fake_network=True)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
self.network = utils.import_object(FLAGS.network_manager)
self.context = context.RequestContext(project=None, user=self.user)
- for i in range(5):
+ for i in range(FLAGS.num_networks):
name = 'project%s' % i
project = self.manager.create_project(name, 'netuser', name)
self.projects.append(project)
@@ -117,6 +115,9 @@ class NetworkTestCase(test.TestCase):
utils.to_global_ipv6(
network_ref['cidr_v6'],
instance_ref['mac_address']))
+ self._deallocate_address(0, address)
+ db.instance_destroy(context.get_admin_context(),
+ instance_ref['id'])
def test_public_network_association(self):
"""Makes sure that we can allocaate a public ip"""
@@ -192,7 +193,7 @@ class NetworkTestCase(test.TestCase):
first = self._create_address(0)
lease_ip(first)
instance_ids = []
- for i in range(1, 5):
+ for i in range(1, FLAGS.num_networks):
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address = self._create_address(i, instance_ref['id'])
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 9548a8c13..1e42fddf3 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import compute
from nova import context
from nova import db
from nova import flags
@@ -87,6 +88,18 @@ class QuotaTestCase(test.TestCase):
num_instances = quota.allowed_instances(self.context, 100,
instance_types.INSTANCE_TYPES['m1.small'])
self.assertEqual(num_instances, 10)
+
+ # metadata_items
+ too_many_items = FLAGS.quota_metadata_items + 1000
+ num_metadata_items = quota.allowed_metadata_items(self.context,
+ too_many_items)
+ self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items)
+ db.quota_update(self.context, self.project.id, {'metadata_items': 5})
+ num_metadata_items = quota.allowed_metadata_items(self.context,
+ too_many_items)
+ self.assertEqual(num_metadata_items, 5)
+
+ # Cleanup
db.quota_destroy(self.context, self.project.id)
def test_too_many_instances(self):
@@ -151,3 +164,15 @@ class QuotaTestCase(test.TestCase):
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
self.context)
db.floating_ip_destroy(context.get_admin_context(), address)
+
+ def test_too_many_metadata_items(self):
+ metadata = {}
+ for i in range(FLAGS.quota_metadata_items + 1):
+ metadata['key%s' % i] = 'value%s' % i
+ self.assertRaises(quota.QuotaError, compute.API().create,
+ self.context,
+ min_count=1,
+ max_count=1,
+ instance_type='m1.small',
+ image_id='fake',
+ metadata=metadata)
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index 9d458244b..b6888c4d2 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -150,6 +150,7 @@ class SimpleDriverTestCase(test.TestCase):
def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
+ super(SimpleDriverTestCase, self).tearDown()
def _create_instance(self, **kwargs):
"""Create a test instance"""
@@ -176,18 +177,8 @@ class SimpleDriverTestCase(test.TestCase):
def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
- # NOTE(vish): constructing service without create method
- # because we are going to use it without queue
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
+ compute1 = self.start_service('compute', host='host1')
+ compute2 = self.start_service('compute', host='host2')
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
@@ -199,18 +190,8 @@ class SimpleDriverTestCase(test.TestCase):
def test_reports_enabled_hosts_as_up(self):
"""Ensures driver can find the hosts that are up"""
- # NOTE(vish): constructing service without create method
- # because we are going to use it without queue
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
+ compute1 = self.start_service('compute', host='host1')
+ compute2 = self.start_service('compute', host='host2')
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(2, len(hosts))
compute1.kill()
@@ -218,16 +199,8 @@ class SimpleDriverTestCase(test.TestCase):
def test_least_busy_host_gets_instance(self):
"""Ensures the host with less cores gets the next one"""
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
+ compute1 = self.start_service('compute', host='host1')
+ compute2 = self.start_service('compute', host='host2')
instance_id1 = self._create_instance()
compute1.run_instance(self.context, instance_id1)
instance_id2 = self._create_instance()
@@ -241,16 +214,8 @@ class SimpleDriverTestCase(test.TestCase):
def test_specific_host_gets_instance(self):
"""Ensures if you set availability_zone it launches on that zone"""
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
+ compute1 = self.start_service('compute', host='host1')
+ compute2 = self.start_service('compute', host='host2')
instance_id1 = self._create_instance()
compute1.run_instance(self.context, instance_id1)
instance_id2 = self._create_instance(availability_zone='nova:host1')
@@ -263,11 +228,7 @@ class SimpleDriverTestCase(test.TestCase):
compute2.kill()
def test_wont_sechedule_if_specified_host_is_down(self):
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
+ compute1 = self.start_service('compute', host='host1')
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
now = datetime.datetime.utcnow()
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
@@ -282,11 +243,7 @@ class SimpleDriverTestCase(test.TestCase):
compute1.kill()
def test_will_schedule_on_disabled_host_if_specified(self):
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
+ compute1 = self.start_service('compute', host='host1')
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
instance_id2 = self._create_instance(availability_zone='nova:host1')
@@ -298,16 +255,8 @@ class SimpleDriverTestCase(test.TestCase):
def test_too_many_cores(self):
"""Ensures we don't go over max cores"""
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
+ compute1 = self.start_service('compute', host='host1')
+ compute2 = self.start_service('compute', host='host2')
instance_ids1 = []
instance_ids2 = []
for index in xrange(FLAGS.max_cores):
@@ -322,6 +271,7 @@ class SimpleDriverTestCase(test.TestCase):
self.scheduler.driver.schedule_run_instance,
self.context,
instance_id)
+ db.instance_destroy(self.context, instance_id)
for instance_id in instance_ids1:
compute1.terminate_instance(self.context, instance_id)
for instance_id in instance_ids2:
@@ -331,16 +281,8 @@ class SimpleDriverTestCase(test.TestCase):
def test_least_busy_host_gets_volume(self):
"""Ensures the host with less gigabytes gets the next one"""
- volume1 = service.Service('host1',
- 'nova-volume',
- 'volume',
- FLAGS.volume_manager)
- volume1.start()
- volume2 = service.Service('host2',
- 'nova-volume',
- 'volume',
- FLAGS.volume_manager)
- volume2.start()
+ volume1 = self.start_service('volume', host='host1')
+ volume2 = self.start_service('volume', host='host2')
volume_id1 = self._create_volume()
volume1.create_volume(self.context, volume_id1)
volume_id2 = self._create_volume()
@@ -354,16 +296,8 @@ class SimpleDriverTestCase(test.TestCase):
def test_too_many_gigabytes(self):
"""Ensures we don't go over max gigabytes"""
- volume1 = service.Service('host1',
- 'nova-volume',
- 'volume',
- FLAGS.volume_manager)
- volume1.start()
- volume2 = service.Service('host2',
- 'nova-volume',
- 'volume',
- FLAGS.volume_manager)
- volume2.start()
+ volume1 = self.start_service('volume', host='host1')
+ volume2 = self.start_service('volume', host='host2')
volume_ids1 = []
volume_ids2 = []
for index in xrange(FLAGS.max_gigabytes):
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index a67c8d1e8..45d9afa6c 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -50,13 +50,6 @@ class ExtendedService(service.Service):
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services"""
- def test_attribute_error_for_no_manager(self):
- serv = service.Service('test',
- 'test',
- 'test',
- 'nova.tests.test_service.FakeManager')
- self.assertRaises(AttributeError, getattr, serv, 'test_method')
-
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py
new file mode 100644
index 000000000..e237674e6
--- /dev/null
+++ b/nova/tests/test_test.py
@@ -0,0 +1,40 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the testing base code."""
+
+from nova import rpc
+from nova import test
+
+
+class IsolationTestCase(test.TestCase):
+ """Ensure that things are cleaned up after failed tests.
+
+ These tests don't really do much here, but if isolation fails a bunch
+ of other tests should fail.
+
+ """
+ def test_service_isolation(self):
+ self.start_service('compute')
+
+ def test_rpc_consumer_isolation(self):
+ connection = rpc.Connection.instance(new=True)
+ consumer = rpc.TopicConsumer(connection, topic='compute')
+ consumer.register_callback(
+ lambda x, y: self.fail('I should never be called'))
+ consumer.attach_to_eventlet()
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
new file mode 100644
index 000000000..34a407f1a
--- /dev/null
+++ b/nova/tests/test_utils.py
@@ -0,0 +1,174 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova import utils
+from nova import exception
+
+
+class GetFromPathTestCase(test.TestCase):
+ def test_tolerates_nones(self):
+ f = utils.get_from_path
+
+ input = []
+ self.assertEquals([], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [None]
+ self.assertEquals([], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': None}]
+ self.assertEquals([], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': None}}]
+ self.assertEquals([{'b': None}], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': None}}}]
+ self.assertEquals([{'b': {'c': None}}], f(input, "a"))
+ self.assertEquals([{'c': None}], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': None}}}, {'a': None}]
+ self.assertEquals([{'b': {'c': None}}], f(input, "a"))
+ self.assertEquals([{'c': None}], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}]
+ self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a"))
+ self.assertEquals([{'c': None}], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ def test_does_select(self):
+ f = utils.get_from_path
+
+ input = [{'a': 'a_1'}]
+ self.assertEquals(['a_1'], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': 'b_1'}}]
+ self.assertEquals([{'b': 'b_1'}], f(input, "a"))
+ self.assertEquals(['b_1'], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}}]
+ self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
+ self.assertEquals(['c_1'], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}]
+ self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
+ self.assertEquals(['c_1'], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}},
+ {'a': {'b': None}}]
+ self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
+ self.assertEquals(['c_1'], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}},
+ {'a': {'b': {'c': 'c_2'}}}]
+ self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}],
+ f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b"))
+ self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c"))
+
+ self.assertEquals([], f(input, "a/b/c/d"))
+ self.assertEquals([], f(input, "c/a/b/d"))
+ self.assertEquals([], f(input, "i/r/t"))
+
+ def test_flattens_lists(self):
+ f = utils.get_from_path
+
+ input = [{'a': [1, 2, 3]}]
+ self.assertEquals([1, 2, 3], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': [1, 2, 3]}}]
+ self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
+ self.assertEquals([1, 2, 3], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}]
+ self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}]
+ self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': [1, 2, {'b': 'b_1'}]}]
+ self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
+ self.assertEquals(['b_1'], f(input, "a/b"))
+
+ def test_bad_xpath(self):
+ f = utils.get_from_path
+
+ self.assertRaises(exception.Error, f, [], None)
+ self.assertRaises(exception.Error, f, [], "")
+ self.assertRaises(exception.Error, f, [], "/")
+ self.assertRaises(exception.Error, f, [], "/a")
+ self.assertRaises(exception.Error, f, [], "/a/")
+ self.assertRaises(exception.Error, f, [], "//")
+ self.assertRaises(exception.Error, f, [], "//a")
+ self.assertRaises(exception.Error, f, [], "a//a")
+ self.assertRaises(exception.Error, f, [], "a//a/")
+ self.assertRaises(exception.Error, f, [], "a/a/")
+
+ def test_real_failure1(self):
+ # Real world failure case...
+ # We weren't coping when the input was a Dictionary instead of a List
+ # This led to test_accepts_dictionaries
+ f = utils.get_from_path
+
+ inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}],
+ 'address': '192.168.0.3'},
+ 'hostname': ''}
+
+ private_ips = f(inst, 'fixed_ip/address')
+ public_ips = f(inst, 'fixed_ip/floating_ips/address')
+ self.assertEquals(['192.168.0.3'], private_ips)
+ self.assertEquals(['1.2.3.4'], public_ips)
+
+ def test_accepts_dictionaries(self):
+ f = utils.get_from_path
+
+ input = {'a': [1, 2, 3]}
+ self.assertEquals([1, 2, 3], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = {'a': {'b': [1, 2, 3]}}
+ self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
+ self.assertEquals([1, 2, 3], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}
+ self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = {'a': [1, 2, {'b': 'b_1'}]}
+ self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
+ self.assertEquals(['b_1'], f(input, "a/b"))
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 6e5a0114b..f151ae911 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -204,11 +204,12 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_conn.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, testuri)
+ db.instance_destroy(user_context, instance_ref['id'])
def tearDown(self):
- super(LibvirtConnTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+ super(LibvirtConnTestCase, self).tearDown()
class IptablesFirewallTestCase(test.TestCase):
@@ -365,6 +366,7 @@ class IptablesFirewallTestCase(test.TestCase):
'--dports 80:81 -j ACCEPT' % security_group_chain \
in self.out_rules,
"TCP port 80/81 acceptance rule wasn't added")
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
class NWFilterTestCase(test.TestCase):
@@ -388,6 +390,7 @@ class NWFilterTestCase(test.TestCase):
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+ super(NWFilterTestCase, self).tearDown()
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
@@ -514,3 +517,4 @@ class NWFilterTestCase(test.TestCase):
self.fw.apply_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 6b8efc9d8..b9bb6d5b4 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -31,6 +31,7 @@ from nova.compute import power_state
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import SimpleDH
from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
@@ -167,6 +168,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
+ stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(VMOps, 'reset_network', reset_network)
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
@@ -283,11 +285,17 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_raw_glance(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(1, None, None)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
+
+ def test_spawn_vhd_glance(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None)
def test_spawn_glance(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(1, 2, 3)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
+ glance_stubs.FakeGlance.IMAGE_KERNEL,
+ glance_stubs.FakeGlance.IMAGE_RAMDISK)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
@@ -336,3 +344,63 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
def tearDown(self):
super(XenAPIDiffieHellmanTestCase, self).tearDown()
+
+
+class XenAPIDetermineDiskImageTestCase(test.TestCase):
+ """
+ Unit tests for code that detects the ImageType
+ """
+ def setUp(self):
+ super(XenAPIDetermineDiskImageTestCase, self).setUp()
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
+
+ class FakeInstance(object):
+ pass
+
+ self.fake_instance = FakeInstance()
+ self.fake_instance.id = 42
+
+ def assert_disk_type(self, disk_type):
+ dt = vm_utils.VMHelper.determine_disk_image_type(
+ self.fake_instance)
+ self.assertEqual(disk_type, dt)
+
+ def test_instance_disk(self):
+ """
+ If a kernel is specified then the image type is DISK (aka machine)
+ """
+ FLAGS.xenapi_image_service = 'objectstore'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
+ self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
+ self.assert_disk_type(vm_utils.ImageType.DISK)
+
+ def test_instance_disk_raw(self):
+ """
+ If the kernel isn't specified, and we're not using Glance, then
+ DISK_RAW is assumed.
+ """
+ FLAGS.xenapi_image_service = 'objectstore'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+
+ def test_glance_disk_raw(self):
+ """
+ If we're using Glance, then defer to the image_type field, which in
+ this case will be 'raw'.
+ """
+ FLAGS.xenapi_image_service = 'glance'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+
+ def test_glance_disk_vhd(self):
+ """
+ If we're using Glance, then defer to the image_type field, which in
+ this case will be 'vhd'.
+ """
+ FLAGS.xenapi_image_service = 'glance'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 624995ada..9beab86e7 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -130,6 +130,12 @@ def stubout_stream_disk(stubs):
stubs.Set(vm_utils, '_stream_disk', f)
+def stubout_is_vdi_pv(stubs):
+ def f(_1):
+ return False
+ stubs.Set(vm_utils, '_is_vdi_pv', f)
+
+
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
def __init__(self, uri):
@@ -171,6 +177,12 @@ class FakeSessionForVMTests(fake.SessionBase):
def VM_destroy(self, session_ref, vm_ref):
fake.destroy_vm(vm_ref)
+ def SR_scan(self, session_ref, sr_ref):
+ pass
+
+ def VDI_set_name_label(self, session_ref, vdi_ref, name_label):
+ pass
+
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """
diff --git a/nova/twistd.py b/nova/twistd.py
index 60ff7879a..c07ed991f 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -148,6 +148,7 @@ def WrapTwistedOptions(wrapped):
options.insert(0, '')
args = FLAGS(options)
+ logging.setup()
argv = args[1:]
# ignore subcommands
@@ -258,7 +259,6 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
- logging.basicConfig()
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
diff --git a/nova/utils.py b/nova/utils.py
index 42efa0008..0cf91e0cc 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -31,6 +32,7 @@ import string
import struct
import sys
import time
+import types
from xml.sax import saxutils
import re
import netaddr
@@ -55,7 +57,7 @@ def import_class(import_str):
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), exc:
- logging.debug(_('Inner Exception: %s'), exc)
+ LOG.debug(_('Inner Exception: %s'), exc)
raise exception.NotFound(_('Class %s cannot be found') % class_str)
@@ -499,3 +501,52 @@ def ensure_b64_encoding(val):
return val
except TypeError:
return base64.b64encode(val)
+
+
+def get_from_path(items, path):
+ """ Returns a list of items matching the specified path. Takes an
+ XPath-like expression e.g. prop1/prop2/prop3, and for each item in items,
+ looks up items[prop1][prop2][prop3]. Like XPath, if any of the
+ intermediate results are lists it will treat each list item individually.
+ A 'None' in items or any child expressions will be ignored, this function
+ will not throw because of None (anywhere) in items. The returned list
+ will contain no None values."""
+
+ if path is None:
+ raise exception.Error("Invalid mini_xpath")
+
+ (first_token, sep, remainder) = path.partition("/")
+
+ if first_token == "":
+ raise exception.Error("Invalid mini_xpath")
+
+ results = []
+
+ if items is None:
+ return results
+
+ if not isinstance(items, types.ListType):
+ # Wrap single objects in a list
+ items = [items]
+
+ for item in items:
+ if item is None:
+ continue
+ get_method = getattr(item, "get", None)
+ if get_method is None:
+ continue
+ child = get_method(first_token)
+ if child is None:
+ continue
+ if isinstance(child, types.ListType):
+ # Flatten intermediate lists
+ for x in child:
+ results.append(x)
+ else:
+ results.append(child)
+
+ if not sep:
+ # No more tokens
+ return results
+ else:
+ return get_from_path(results, remainder)
diff --git a/nova/virt/disk.py b/nova/virt/disk.py
index c5565abfa..2bded07a4 100644
--- a/nova/virt/disk.py
+++ b/nova/virt/disk.py
@@ -38,6 +38,10 @@ flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
'minimum size in bytes of root partition')
flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
'block_size to use for dd')
+flags.DEFINE_integer('timeout_nbd', 10,
+ 'time to wait for a NBD device coming up')
+flags.DEFINE_integer('max_nbd_devices', 16,
+ 'maximum number of possible nbd devices')
def extend(image, size):
@@ -117,7 +121,7 @@ def _link_device(image, nbd):
utils.execute('sudo qemu-nbd -c %s %s' % (device, image))
# NOTE(vish): this forks into another process, so give it a chance
# to set up before continuuing
- for i in xrange(10):
+ for i in xrange(FLAGS.timeout_nbd):
if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)):
return device
time.sleep(1)
@@ -139,7 +143,7 @@ def _unlink_device(device, nbd):
utils.execute('sudo losetup --detach %s' % device)
-_DEVICES = ['/dev/nbd%s' % i for i in xrange(16)]
+_DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)]
def _allocate_device():
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 92749f38a..4346dffc1 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -319,7 +319,9 @@ class FakeConnection(object):
return 'FAKE CONSOLE OUTPUT'
def get_ajax_console(self, instance):
- return 'http://fakeajaxconsole.com/?token=FAKETOKEN'
+ return {'token': 'FAKETOKEN',
+ 'host': 'fakeajaxconsole.com',
+ 'port': 6969}
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 80cc3035d..6d9aeb060 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -24,6 +24,7 @@ import pickle
import re
import time
import urllib
+import uuid
from xml.dom import minidom
from eventlet import event
@@ -63,11 +64,14 @@ class ImageType:
0 - kernel/ramdisk image (goes on dom0's filesystem)
1 - disk image (local SR, partitioned by objectstore plugin)
2 - raw disk image (local SR, NOT partitioned by plugin)
+ 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
+ linux, HVM assumed for Windows)
"""
KERNEL_RAMDISK = 0
DISK = 1
DISK_RAW = 2
+ DISK_VHD = 3
class VMHelper(HelperBase):
@@ -276,29 +280,35 @@ class VMHelper(HelperBase):
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
#TODO(sirp): we need to assert only one parent, not parents two deep
- return template_vm_ref, [template_vdi_uuid, parent_uuid]
+ template_vdi_uuids = {'image': parent_uuid,
+ 'snap': template_vdi_uuid}
+ return template_vm_ref, template_vdi_uuids
@classmethod
def upload_image(cls, session, instance_id, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
+ # NOTE(sirp): Currently we only support uploading images as VHD, there
+ # is no RAW equivalent (yet)
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': FLAGS.glance_host,
- 'glance_port': FLAGS.glance_port}
+ 'glance_port': FLAGS.glance_port,
+ 'sr_path': get_sr_path(session)}
kwargs = {'params': pickle.dumps(params)}
- task = session.async_call_plugin('glance', 'put_vdis', kwargs)
+ task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
session.wait_for_task(instance_id, task)
@classmethod
- def fetch_image(cls, session, instance_id, image, user, project, type):
+ def fetch_image(cls, session, instance_id, image, user, project,
+ image_type):
"""
- type is interpreted as an ImageType instance
+ image_type is interpreted as an ImageType instance
Related flags:
xenapi_image_service = ['glance', 'objectstore']
glance_address = 'address for glance services'
@@ -308,35 +318,80 @@ class VMHelper(HelperBase):
if FLAGS.xenapi_image_service == 'glance':
return cls._fetch_image_glance(session, instance_id, image,
- access, type)
+ access, image_type)
else:
return cls._fetch_image_objectstore(session, instance_id, image,
- access, user.secret, type)
+ access, user.secret,
+ image_type)
@classmethod
- def _fetch_image_glance(cls, session, instance_id, image, access, type):
- sr = find_sr(session)
- if sr is None:
- raise exception.NotFound('Cannot find SR to write VDI to')
+ def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
+ image_type):
+ LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
+ % locals())
+
+ sr_ref = safe_find_sr(session)
+
+ # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
+ # have the `uuid` module. To work around this, we generate the uuids
+ # here (under Python 2.6+) and pass them as arguments
+ uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
+
+ params = {'image_id': image,
+ 'glance_host': FLAGS.glance_host,
+ 'glance_port': FLAGS.glance_port,
+ 'uuid_stack': uuid_stack,
+ 'sr_path': get_sr_path(session)}
+
+ kwargs = {'params': pickle.dumps(params)}
+ task = session.async_call_plugin('glance', 'download_vhd', kwargs)
+ vdi_uuid = session.wait_for_task(instance_id, task)
+
+ scan_sr(session, instance_id, sr_ref)
+
+ # Set the name-label to ease debugging
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ name_label = get_name_label_for_image(image)
+ session.get_xenapi().VDI.set_name_label(vdi_ref, name_label)
+
+ LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s")
+ % locals())
+ return vdi_uuid
- c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ @classmethod
+ def _fetch_image_glance_disk(cls, session, instance_id, image, access,
+ image_type):
+ """Fetch the image from Glance
+
+ NOTE:
+ Unlike _fetch_image_glance_vhd, this method does not use the Glance
+ plugin; instead, it streams the disks through domU to the VDI
+ directly.
+
+ """
+ # FIXME(sirp): Since the Glance plugin seems to be required for the
+ # VHD disk, it may be worth using the plugin for both VHD and RAW and
+ # DISK restores
+ sr_ref = safe_find_sr(session)
- meta, image_file = c.get_image(image)
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta, image_file = client.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
- if type == ImageType.DISK:
+
+ if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
- vdi = cls.create_vdi(session, sr, _('Glance image %s') % image,
- vdi_size, False)
+ name_label = get_name_label_for_image(image)
+ vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
with_vdi_attached_here(session, vdi, False,
lambda dev:
- _stream_disk(dev, type,
+ _stream_disk(dev, image_type,
virtual_size, image_file))
- if (type == ImageType.KERNEL_RAMDISK):
+ if image_type == ImageType.KERNEL_RAMDISK:
#we need to invoke a plugin for copying VDI's
#content into proper path
LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
@@ -355,20 +410,87 @@ class VMHelper(HelperBase):
return session.get_xenapi().VDI.get_uuid(vdi)
@classmethod
+ def determine_disk_image_type(cls, instance):
+ """Disk Image Types are used to determine where the kernel will reside
+ within an image. To figure out which type we're dealing with, we use
+ the following rules:
+
+ 1. If we're using Glance, we can use the image_type field to
+ determine the image_type
+
+ 2. If we're not using Glance, then we need to deduce this based on
+ whether a kernel_id is specified.
+ """
+ def log_disk_format(image_type):
+ pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
+ ImageType.DISK: 'DISK',
+ ImageType.DISK_RAW: 'DISK_RAW',
+ ImageType.DISK_VHD: 'DISK_VHD'}
+ disk_format = pretty_format[image_type]
+ image_id = instance.image_id
+ instance_id = instance.id
+ LOG.debug(_("Detected %(disk_format)s format for image "
+ "%(image_id)s, instance %(instance_id)s") % locals())
+
+ def determine_from_glance():
+ glance_type2nova_type = {'machine': ImageType.DISK,
+ 'raw': ImageType.DISK_RAW,
+ 'vhd': ImageType.DISK_VHD,
+ 'kernel': ImageType.KERNEL_RAMDISK,
+ 'ramdisk': ImageType.KERNEL_RAMDISK}
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta = client.get_image_meta(instance.image_id)
+ type_ = meta['type']
+ try:
+ return glance_type2nova_type[type_]
+ except KeyError:
+ raise exception.NotFound(
+ _("Unrecognized image type '%(type_)s'") % locals())
+
+ def determine_from_instance():
+ if instance.kernel_id:
+ return ImageType.DISK
+ else:
+ return ImageType.DISK_RAW
+
+ # FIXME(sirp): can we unify the ImageService and xenapi_image_service
+ # abstractions?
+ if FLAGS.xenapi_image_service == 'glance':
+ image_type = determine_from_glance()
+ else:
+ image_type = determine_from_instance()
+
+ log_disk_format(image_type)
+ return image_type
+
+ @classmethod
+ def _fetch_image_glance(cls, session, instance_id, image, access,
+ image_type):
+ if image_type == ImageType.DISK_VHD:
+ return cls._fetch_image_glance_vhd(
+ session, instance_id, image, access, image_type)
+ else:
+ return cls._fetch_image_glance_disk(
+ session, instance_id, image, access, image_type)
+
+ @classmethod
def _fetch_image_objectstore(cls, session, instance_id, image, access,
- secret, type):
+ secret, image_type):
url = images.image_url(image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
+ if image_type == ImageType.KERNEL_RAMDISK:
+ fn = 'get_kernel'
+ else:
+ fn = 'get_vdi'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
- if type != ImageType.KERNEL_RAMDISK:
+ if image_type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true'
- if type == ImageType.DISK_RAW:
+ if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
uuid = session.wait_for_task(instance_id, task)
@@ -376,6 +498,9 @@ class VMHelper(HelperBase):
@classmethod
def lookup_image(cls, session, instance_id, vdi_ref):
+ """
+ Determine if VDI is using a PV kernel
+ """
if FLAGS.xenapi_image_service == 'glance':
return cls._lookup_image_glance(session, vdi_ref)
else:
@@ -400,19 +525,7 @@ class VMHelper(HelperBase):
@classmethod
def _lookup_image_glance(cls, session, vdi_ref):
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
-
- def is_vdi_pv(dev):
- LOG.debug(_("Running pygrub against %s"), dev)
- output = os.popen('pygrub -qn /dev/%s' % dev)
- for line in output.readlines():
- #try to find kernel string
- m = re.search('(?<=kernel:)/.*(?:>)', line)
- if m and m.group(0).find('xen') != -1:
- LOG.debug(_("Found Xen kernel %s") % m.group(0))
- return True
- LOG.debug(_("No Xen kernel found. Booting HVM."))
- return False
- return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv)
+ return with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
@classmethod
def lookup(cls, session, i):
@@ -599,7 +712,18 @@ def get_vdi_for_vm_safely(session, vm_ref):
return vdi_ref, vdi_rec
+def safe_find_sr(session):
+ """Same as find_sr except raises a NotFound exception if SR cannot be
+ determined
+ """
+ sr_ref = find_sr(session)
+ if sr_ref is None:
+ raise exception.NotFound(_('Cannot find SR to read/write VDI'))
+ return sr_ref
+
+
def find_sr(session):
+ """Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
srs = session.get_xenapi().SR.get_all()
for sr in srs:
@@ -614,6 +738,18 @@ def find_sr(session):
return None
+def get_sr_path(session):
+ """Return the path to our storage repository
+
+ This is used when we're dealing with VHDs directly, either by taking
+ snapshots or by restoring an image in the DISK_VHD format.
+ """
+ sr_ref = safe_find_sr(session)
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
+ sr_uuid = sr_rec["uuid"]
+ return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
+
+
def remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
@@ -714,9 +850,22 @@ def get_this_vm_ref(session):
return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid())
-def _stream_disk(dev, type, virtual_size, image_file):
+def _is_vdi_pv(dev):
+ LOG.debug(_("Running pygrub against %s"), dev)
+ output = os.popen('pygrub -qn /dev/%s' % dev)
+ for line in output.readlines():
+ #try to find kernel string
+ m = re.search('(?<=kernel:)/.*(?:>)', line)
+ if m and m.group(0).find('xen') != -1:
+ LOG.debug(_("Found Xen kernel %s") % m.group(0))
+ return True
+ LOG.debug(_("No Xen kernel found. Booting HVM."))
+ return False
+
+
+def _stream_disk(dev, image_type, virtual_size, image_file):
offset = 0
- if type == ImageType.DISK:
+ if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
@@ -745,3 +894,8 @@ def _write_partition(virtual_size, dev):
(dest, primary_first, primary_last))
LOG.debug(_('Writing partition table %s done.'), dest)
+
+
+def get_name_label_for_image(image):
+ # TODO(sirp): This should eventually be the URI for the Glance image
+ return _('Glance image %s') % image
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 0168681f6..bc39aa140 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -80,70 +80,40 @@ class VMOps(object):
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- #if kernel is not present we must download a raw disk
- if instance.kernel_id:
- disk_image_type = ImageType.DISK
- else:
- disk_image_type = ImageType.DISK_RAW
+ disk_image_type = VMHelper.determine_disk_image_type(instance)
+
vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
instance.image_id, user, project, disk_image_type)
+
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- #Have a look at the VDI and see if it has a PV kernel
+
pv_kernel = False
- if not instance.kernel_id:
+ if disk_image_type == ImageType.DISK_RAW:
+ #Have a look at the VDI and see if it has a PV kernel
pv_kernel = VMHelper.lookup_image(self._session, instance.id,
vdi_ref)
+ elif disk_image_type == ImageType.DISK_VHD:
+ # TODO(sirp): Assuming PV for now; this will need to be
+ # configurable as Windows will use HVM.
+ pv_kernel = True
+
kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
+
ramdisk = None
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
+
vm_ref = VMHelper.create_vm(self._session,
instance, kernel, ramdisk, pv_kernel)
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
- # write network info
- admin_context = context.get_admin_context()
-
- # TODO(tr3buchet) - remove comment in multi-nic
- # I've decided to go ahead and consider multiple IPs and networks
- # at this stage even though they aren't implemented because these will
- # be needed for multi-nic and there was no sense writing it for single
- # network/single IP and then having to turn around and re-write it
- IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id'])
- for network in db.network_get_all_by_instance(admin_context,
- instance['id']):
- network_IPs = [ip for ip in IPs if ip.network_id == network.id]
-
- def ip_dict(ip):
- return {'netmask': network['netmask'],
- 'enabled': '1',
- 'ip': ip.address}
-
- mac_id = instance.mac_address.replace(':', '')
- location = 'vm-data/networking/%s' % mac_id
- mapping = {'label': network['label'],
- 'gateway': network['gateway'],
- 'mac': instance.mac_address,
- 'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_IPs]}
- self.write_to_param_xenstore(vm_ref, {location: mapping})
-
- # TODO(tr3buchet) - remove comment in multi-nic
- # this bit here about creating the vifs will be updated
- # in multi-nic to handle multiple IPs on the same network
- # and multiple networks
- # for now it works as there is only one of each
- bridge = network['bridge']
- network_ref = \
- NetworkHelper.find_network_with_bridge(self._session, bridge)
-
- if network_ref:
- VMHelper.create_vif(self._session, vm_ref,
- network_ref, instance.mac_address)
+ # inject_network_info and create vifs
+ networks = self.inject_network_info(instance)
+ self.create_vifs(instance, networks)
LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
@@ -193,7 +163,7 @@ class VMOps(object):
timer.f = _wait_for_boot
- # call reset networking
+ # call to reset network to configure network from xenstore
self.reset_network(instance)
return timer.start(interval=0.5, now=True)
@@ -275,7 +245,8 @@ class VMOps(object):
VMHelper.upload_image(
self._session, instance.id, template_vdi_uuids, image_id)
finally:
- self._destroy(instance, template_vm_ref, shutdown=False)
+ self._destroy(instance, template_vm_ref, shutdown=False,
+ destroy_kernel_ramdisk=False)
logging.debug(_("Finished snapshot and upload for VM %s"), instance)
@@ -357,6 +328,9 @@ class VMOps(object):
locals())
return
+ instance_id = instance.id
+ LOG.debug(_("Shutting down VM for Instance %(instance_id)s")
+ % locals())
try:
task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
self._session.wait_for_task(instance.id, task)
@@ -365,6 +339,9 @@ class VMOps(object):
def _destroy_vdis(self, instance, vm):
"""Destroys all VDIs associated with a VM """
+ instance_id = instance.id
+ LOG.debug(_("Destroying VDIs for Instance %(instance_id)s")
+ % locals())
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
if not vdis:
@@ -377,29 +354,56 @@ class VMOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def _destroy_kernel_ramdisk(self, instance, vm):
+ """
+ Three situations can occur:
+
+ 1. We have neither a ramdisk nor a kernel, in which case we are a
+ RAW image and can omit this step
+
+ 2. We have one or the other, in which case, we should flag as an
+ error
+
+ 3. We have both, in which case we safely remove both the kernel
+ and the ramdisk.
+ """
+ instance_id = instance.id
+ if not instance.kernel_id and not instance.ramdisk_id:
+ # 1. No kernel or ramdisk
+ LOG.debug(_("Instance %(instance_id)s using RAW or VHD, "
+ "skipping kernel and ramdisk deletion") % locals())
+ return
+
+ if not (instance.kernel_id and instance.ramdisk_id):
+ # 2. We only have kernel xor ramdisk
+ raise exception.NotFound(
+ _("Instance %(instance_id)s has a kernel or ramdisk but not "
+ "both" % locals()))
+
+ # 3. We have both kernel and ramdisk
+ (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
+ self._session, vm)
+
+ LOG.debug(_("Removing kernel/ramdisk files"))
+
+ args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
+ task = self._session.async_call_plugin(
+ 'glance', 'remove_kernel_ramdisk', args)
+ self._session.wait_for_task(instance.id, task)
+
+ LOG.debug(_("kernel/ramdisk files removed"))
+
def _destroy_vm(self, instance, vm):
"""Destroys a VM record """
+ instance_id = instance.id
try:
- kernel = None
- ramdisk = None
- if instance.kernel_id or instance.ramdisk_id:
- (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
- self._session, vm)
- task1 = self._session.call_xenapi('Async.VM.destroy', vm)
- LOG.debug(_("Removing kernel/ramdisk files"))
- fn = "remove_kernel_ramdisk"
- args = {}
- if kernel:
- args['kernel-file'] = kernel
- if ramdisk:
- args['ramdisk-file'] = ramdisk
- task2 = self._session.async_call_plugin('glance', fn, args)
- self._session.wait_for_task(instance.id, task1)
- self._session.wait_for_task(instance.id, task2)
- LOG.debug(_("kernel/ramdisk files removed"))
+ task = self._session.call_xenapi('Async.VM.destroy', vm)
+ self._session.wait_for_task(instance_id, task)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals())
+
def destroy(self, instance):
"""
Destroy VM instance
@@ -407,26 +411,31 @@ class VMOps(object):
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
+ instance_id = instance.id
+ LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
vm = VMHelper.lookup(self._session, instance.name)
return self._destroy(instance, vm, shutdown=True)
- def _destroy(self, instance, vm, shutdown=True):
+ def _destroy(self, instance, vm, shutdown=True,
+ destroy_kernel_ramdisk=True):
"""
Destroys VM instance by performing:
- 1. A shutdown if requested
- 2. Destroying associated VDIs
- 3. Destroying that actual VM record
+ 1. A shutdown if requested
+ 2. Destroying associated VDIs
+ 3. Destroying kernel and ramdisk files (if necessary)
+ 4. Destroying that actual VM record
"""
if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
+ LOG.warning(_("VM is not present, skipping destroy..."))
return
if shutdown:
self._shutdown(instance, vm)
self._destroy_vdis(instance, vm)
+ if destroy_kernel_ramdisk:
+ self._destroy_kernel_ramdisk(instance, vm)
self._destroy_vm(instance, vm)
def _wait_with_callback(self, instance_id, task, callback):
@@ -483,6 +492,73 @@ class VMOps(object):
# TODO: implement this!
return 'http://fakeajaxconsole/fake_url'
+ def inject_network_info(self, instance):
+ """
+ Generate the network info and make calls to place it into the
+ xenstore and the xenstore param list
+
+ """
+ # TODO(tr3buchet) - remove comment in multi-nic
+ # I've decided to go ahead and consider multiple IPs and networks
+ # at this stage even though they aren't implemented because these will
+ # be needed for multi-nic and there was no sense writing it for single
+ # network/single IP and then having to turn around and re-write it
+ vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
+ logging.debug(_("injecting network info to xenstore for vm: |%s|"),
+ vm_opaque_ref)
+ admin_context = context.get_admin_context()
+ IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id'])
+ networks = db.network_get_all_by_instance(admin_context,
+ instance['id'])
+ for network in networks:
+ network_IPs = [ip for ip in IPs if ip.network_id == network.id]
+
+ def ip_dict(ip):
+ return {'netmask': network['netmask'],
+ 'enabled': '1',
+ 'ip': ip.address}
+
+ mac_id = instance.mac_address.replace(':', '')
+ location = 'vm-data/networking/%s' % mac_id
+ mapping = {'label': network['label'],
+ 'gateway': network['gateway'],
+ 'mac': instance.mac_address,
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_IPs]}
+ self.write_to_param_xenstore(vm_opaque_ref, {location: mapping})
+ try:
+ self.write_to_xenstore(vm_opaque_ref, location,
+ mapping['location'])
+ except KeyError:
+ # catch KeyError for domid if instance isn't running
+ pass
+
+ return networks
+
+ def create_vifs(self, instance, networks=None):
+ """
+ Creates vifs for an instance
+
+ """
+ vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
+ logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref)
+ if networks is None:
+ networks = db.network_get_all_by_instance(admin_context,
+ instance['id'])
+ # TODO(tr3buchet) - remove comment in multi-nic
+ # this bit here about creating the vifs will be updated
+ # in multi-nic to handle multiple IPs on the same network
+ # and multiple networks
+ # for now it works as there is only one of each
+ for network in networks:
+ bridge = network['bridge']
+ network_ref = \
+ NetworkHelper.find_network_with_bridge(self._session, bridge)
+
+ if network_ref:
+ VMHelper.create_vif(self._session, vm_opaque_ref,
+ network_ref, instance.mac_address)
+
def reset_network(self, instance):
"""
Creates uuid arg to pass to make_agent_call and calls it.
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index c2f65699f..60bfe0a9f 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -100,6 +100,8 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts',
5,
'Max number of times to poll for VHD to coalesce.'
' Used only if connection_type=xenapi.')
+flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount',
+ 'Base path to the storage repository')
flags.DEFINE_string('target_host',
None,
'iSCSI Target Host')
@@ -198,6 +200,10 @@ class XenAPIConnection(object):
"""reset networking for specified instance"""
self._vmops.reset_network(instance)
+ def inject_network_info(self, instance):
+ """inject network info for specified instance"""
+ self._vmops.inject_network_info(instance)
+
def get_info(self, instance_id):
"""Return data about VM instance"""
return self._vmops.get_info(instance_id)
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 82f4c2f54..e3744c790 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -21,6 +21,7 @@ Drivers for volumes.
"""
import time
+import os
from nova import exception
from nova import flags
@@ -36,6 +37,8 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
flags.DEFINE_string('num_shell_tries', 3,
'number of times to attempt to run flakey shell commands')
+flags.DEFINE_string('num_iscsi_scan_tries', 3,
+ 'number of times to rescan iSCSI target to find volume')
flags.DEFINE_integer('num_shelves',
100,
'Number of vblade shelves')
@@ -88,7 +91,8 @@ class VolumeDriver(object):
% FLAGS.volume_group)
def create_volume(self, volume):
- """Creates a logical volume."""
+ """Creates a logical volume. Can optionally return a Dictionary of
+ changes to the volume object to be persisted."""
if int(volume['size']) == 0:
sizestr = '100M'
else:
@@ -123,7 +127,8 @@ class VolumeDriver(object):
raise NotImplementedError()
def create_export(self, context, volume):
- """Exports the volume."""
+ """Exports the volume. Can optionally return a Dictionary of changes
+ to the volume object to be persisted."""
raise NotImplementedError()
def remove_export(self, context, volume):
@@ -222,7 +227,18 @@ class FakeAOEDriver(AOEDriver):
class ISCSIDriver(VolumeDriver):
- """Executes commands relating to ISCSI volumes."""
+ """Executes commands relating to ISCSI volumes.
+
+ We make use of model provider properties as follows:
+
+ :provider_location: if present, contains the iSCSI target information
+ in the same format as an ietadm discovery
+ i.e. '<ip>:<port>,<portal> <target IQN>'
+
+ :provider_auth: if present, contains a space-separated triple:
+ '<auth method> <auth username> <auth password>'.
+ `CHAP` is the only auth_method in use at the moment.
+ """
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
@@ -294,40 +310,149 @@ class ISCSIDriver(VolumeDriver):
self._execute("sudo ietadm --op delete --tid=%s" %
iscsi_target)
- def _get_name_and_portal(self, volume):
- """Gets iscsi name and portal from volume name and host."""
+ def _do_iscsi_discovery(self, volume):
+ #TODO(justinsb): Deprecate discovery and use stored info
+ #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
+ LOG.warn(_("ISCSI provider_location not stored, using discovery"))
+
volume_name = volume['name']
- host = volume['host']
+
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
- "sendtargets -p %s" % host)
+ "sendtargets -p %s" % (volume['host']))
for target in out.splitlines():
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
- (location, _sep, iscsi_name) = target.partition(" ")
- break
- iscsi_portal = location.split(",")[0]
- return (iscsi_name, iscsi_portal)
+ return target
+ return None
+
+ def _get_iscsi_properties(self, volume):
+ """Gets iscsi configuration
+
+ We ideally get saved information in the volume entity, but fall back
+ to discovery if need be. Discovery may be completely removed in future
+ The properties are:
+
+ :target_discovered: boolean indicating whether discovery was used
+
+ :target_iqn: the IQN of the iSCSI target
+
+ :target_portal: the portal of the iSCSI target
+
+ :auth_method:, :auth_username:, :auth_password:
+
+ the authentication details. Right now, either auth_method is not
+ present meaning no authentication, or auth_method == `CHAP`
+ meaning use CHAP with the specified credentials.
+ """
+
+ properties = {}
+
+ location = volume['provider_location']
+
+ if location:
+ # provider_location is the same format as iSCSI discovery output
+ properties['target_discovered'] = False
+ else:
+ location = self._do_iscsi_discovery(volume)
+
+ if not location:
+ raise exception.Error(_("Could not find iSCSI export "
+ " for volume %s") %
+ (volume['name']))
+
+ LOG.debug(_("ISCSI Discovery: Found %s") % (location))
+ properties['target_discovered'] = True
+
+ (iscsi_target, _sep, iscsi_name) = location.partition(" ")
+
+ iscsi_portal = iscsi_target.split(",")[0]
+
+ properties['target_iqn'] = iscsi_name
+ properties['target_portal'] = iscsi_portal
+
+ auth = volume['provider_auth']
+
+ if auth:
+ (auth_method, auth_username, auth_secret) = auth.split()
+
+ properties['auth_method'] = auth_method
+ properties['auth_username'] = auth_username
+ properties['auth_password'] = auth_secret
+
+ return properties
+
+ def _run_iscsiadm(self, iscsi_properties, iscsi_command):
+ command = ("sudo iscsiadm -m node -T %s -p %s %s" %
+ (iscsi_properties['target_iqn'],
+ iscsi_properties['target_portal'],
+ iscsi_command))
+ (out, err) = self._execute(command)
+ LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
+ (iscsi_command, out, err))
+ return (out, err)
+
+ def _iscsiadm_update(self, iscsi_properties, property_key, property_value):
+ iscsi_command = ("--op update -n %s -v %s" %
+ (property_key, property_value))
+ return self._run_iscsiadm(iscsi_properties, iscsi_command)
def discover_volume(self, volume):
"""Discover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
- self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
- (iscsi_name, iscsi_portal))
- self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
- "-n node.startup -v automatic" %
- (iscsi_name, iscsi_portal))
- return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal,
- iscsi_name)
+ iscsi_properties = self._get_iscsi_properties(volume)
+
+ if not iscsi_properties['target_discovered']:
+ self._run_iscsiadm(iscsi_properties, "--op new")
+
+ if iscsi_properties.get('auth_method'):
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.authmethod",
+ iscsi_properties['auth_method'])
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.username",
+ iscsi_properties['auth_username'])
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.password",
+ iscsi_properties['auth_password'])
+
+ self._run_iscsiadm(iscsi_properties, "--login")
+
+ self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
+
+ mount_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" %
+ (iscsi_properties['target_portal'],
+ iscsi_properties['target_iqn']))
+
+ # The /dev/disk/by-path/... node is not always present immediately
+ # TODO(justinsb): This retry-with-delay is a pattern, move to utils?
+ tries = 0
+ while not os.path.exists(mount_device):
+ if tries >= FLAGS.num_iscsi_scan_tries:
+ raise exception.Error(_("iSCSI device not found at %s") %
+ (mount_device))
+
+ LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
+ "Will rescan & retry. Try number: %(tries)s") %
+ locals())
+
+ # The rescan isn't documented as being necessary(?), but it helps
+ self._run_iscsiadm(iscsi_properties, "--rescan")
+
+ tries = tries + 1
+ if not os.path.exists(mount_device):
+ time.sleep(tries ** 2)
+
+ if tries != 0:
+ LOG.debug(_("Found iSCSI node %(mount_device)s "
+ "(after %(tries)s rescans)") %
+ locals())
+
+ return mount_device
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
- self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
- "-n node.startup -v manual" %
- (iscsi_name, iscsi_portal))
- self._execute("sudo iscsiadm -m node -T %s -p %s --logout " %
- (iscsi_name, iscsi_portal))
- self._execute("sudo iscsiadm -m node --op delete "
- "--targetname %s" % iscsi_name)
+ iscsi_properties = self._get_iscsi_properties(volume)
+ self._iscsiadm_update(iscsi_properties, "node.startup", "manual")
+ self._run_iscsiadm(iscsi_properties, "--logout")
+ self._run_iscsiadm(iscsi_properties, "--op delete")
class FakeISCSIDriver(ISCSIDriver):
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index d2f02e4e0..3e8bc16b3 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -107,10 +107,14 @@ class VolumeManager(manager.Manager):
vol_size = volume_ref['size']
LOG.debug(_("volume %(vol_name)s: creating lv of"
" size %(vol_size)sG") % locals())
- self.driver.create_volume(volume_ref)
+ model_update = self.driver.create_volume(volume_ref)
+ if model_update:
+ self.db.volume_update(context, volume_ref['id'], model_update)
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
- self.driver.create_export(context, volume_ref)
+ model_update = self.driver.create_export(context, volume_ref)
+ if model_update:
+ self.db.volume_update(context, volume_ref['id'], model_update)
except Exception:
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
diff --git a/nova/volume/san.py b/nova/volume/san.py
index 26d6125e7..9532c8116 100644
--- a/nova/volume/san.py
+++ b/nova/volume/san.py
@@ -16,13 +16,16 @@
# under the License.
"""
Drivers for san-stored volumes.
+
The unique thing about a SAN is that we don't expect that we can run the volume
- controller on the SAN hardware. We expect to access it over SSH or some API.
+controller on the SAN hardware. We expect to access it over SSH or some API.
"""
import os
import paramiko
+from xml.etree import ElementTree
+
from nova import exception
from nova import flags
from nova import log as logging
@@ -41,37 +44,19 @@ flags.DEFINE_string('san_password', '',
'Password for SAN controller')
flags.DEFINE_string('san_privatekey', '',
'Filename of private key to use for SSH authentication')
+flags.DEFINE_string('san_clustername', '',
+ 'Cluster name to use for creating volumes')
+flags.DEFINE_integer('san_ssh_port', 22,
+ 'SSH port to use with SAN')
class SanISCSIDriver(ISCSIDriver):
""" Base class for SAN-style storage volumes
- (storage providers we access over SSH)"""
- #Override because SAN ip != host ip
- def _get_name_and_portal(self, volume):
- """Gets iscsi name and portal from volume name and host."""
- volume_name = volume['name']
-
- # TODO(justinsb): store in volume, remerge with generic iSCSI code
- host = FLAGS.san_ip
-
- (out, _err) = self._execute("sudo iscsiadm -m discovery -t "
- "sendtargets -p %s" % host)
-
- location = None
- find_iscsi_name = self._build_iscsi_target_name(volume)
- for target in out.splitlines():
- if find_iscsi_name in target:
- (location, _sep, iscsi_name) = target.partition(" ")
- break
- if not location:
- raise exception.Error(_("Could not find iSCSI export "
- " for volume %s") %
- volume_name)
-
- iscsi_portal = location.split(",")[0]
- LOG.debug("iscsi_name=%s, iscsi_portal=%s" %
- (iscsi_name, iscsi_portal))
- return (iscsi_name, iscsi_portal)
+
+ A SAN-style storage value is 'different' because the volume controller
+ probably won't run on it, so we need to access is over SSH or another
+ remote protocol.
+ """
def _build_iscsi_target_name(self, volume):
return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
@@ -85,6 +70,7 @@ class SanISCSIDriver(ISCSIDriver):
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if FLAGS.san_password:
ssh.connect(FLAGS.san_ip,
+ port=FLAGS.san_ssh_port,
username=FLAGS.san_login,
password=FLAGS.san_password)
elif FLAGS.san_privatekey:
@@ -92,10 +78,11 @@ class SanISCSIDriver(ISCSIDriver):
# It sucks that paramiko doesn't support DSA keys
privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
ssh.connect(FLAGS.san_ip,
+ port=FLAGS.san_ssh_port,
username=FLAGS.san_login,
pkey=privatekey)
else:
- raise exception.Error("Specify san_password or san_privatekey")
+ raise exception.Error(_("Specify san_password or san_privatekey"))
return ssh
def _run_ssh(self, command, check_exit_code=True):
@@ -124,10 +111,10 @@ class SanISCSIDriver(ISCSIDriver):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
if not (FLAGS.san_password or FLAGS.san_privatekey):
- raise exception.Error("Specify san_password or san_privatekey")
+ raise exception.Error(_("Specify san_password or san_privatekey"))
if not (FLAGS.san_ip):
- raise exception.Error("san_ip must be set")
+ raise exception.Error(_("san_ip must be set"))
def _collect_lines(data):
@@ -155,17 +142,27 @@ def _get_prefixed_values(data, prefix):
class SolarisISCSIDriver(SanISCSIDriver):
"""Executes commands relating to Solaris-hosted ISCSI volumes.
+
Basic setup for a Solaris iSCSI server:
+
pkg install storage-server SUNWiscsit
+
svcadm enable stmf
+
svcadm enable -r svc:/network/iscsi/target:default
+
pfexec itadm create-tpg e1000g0 ${MYIP}
+
pfexec itadm create-target -t e1000g0
+
Then grant the user that will be logging on lots of permissions.
I'm not sure exactly which though:
+
zfs allow justinsb create,mount,destroy rpool
+
usermod -P'File System Management' justinsb
+
usermod -P'Primary Administrator' justinsb
Also make sure you can login using san_login & san_password/san_privatekey
@@ -306,6 +303,17 @@ class SolarisISCSIDriver(SanISCSIDriver):
self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" %
(target_group_name, luid))
+ #TODO(justinsb): Is this always 1? Does it matter?
+ iscsi_portal_interface = '1'
+ iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface
+
+ db_update = {}
+ db_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ iscsi_name))
+
+ return db_update
+
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
@@ -333,3 +341,245 @@ class SolarisISCSIDriver(SanISCSIDriver):
if self._is_lu_created(volume):
self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" %
(luid))
+
+
+class HpSanISCSIDriver(SanISCSIDriver):
+ """Executes commands relating to HP/Lefthand SAN ISCSI volumes.
+
+ We use the CLIQ interface, over SSH.
+
+ Rough overview of CLIQ commands used:
+
+ :createVolume: (creates the volume)
+
+ :getVolumeInfo: (to discover the IQN etc)
+
+ :getClusterInfo: (to discover the iSCSI target IP address)
+
+ :assignVolumeChap: (exports it with CHAP security)
+
+ The 'trick' here is that the HP SAN enforces security by default, so
+ normally a volume mount would need both to configure the SAN in the volume
+ layer and do the mount on the compute layer. Multi-layer operations are
+ not catered for at the moment in the nova architecture, so instead we
+ share the volume using CHAP at volume creation time. Then the mount need
+ only use those CHAP credentials, so can take place exclusively in the
+ compute layer.
+ """
+
+ def _cliq_run(self, verb, cliq_args):
+ """Runs a CLIQ command over SSH, without doing any result parsing"""
+ cliq_arg_strings = []
+ for k, v in cliq_args.items():
+ cliq_arg_strings.append(" %s=%s" % (k, v))
+ cmd = verb + ''.join(cliq_arg_strings)
+
+ return self._run_ssh(cmd)
+
+ def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True):
+ """Runs a CLIQ command over SSH, parsing and checking the output"""
+ cliq_args['output'] = 'XML'
+ (out, _err) = self._cliq_run(verb, cliq_args)
+
+ LOG.debug(_("CLIQ command returned %s"), out)
+
+ result_xml = ElementTree.fromstring(out)
+ if check_cliq_result:
+ response_node = result_xml.find("response")
+ if response_node is None:
+ msg = (_("Malformed response to CLIQ command "
+ "%(verb)s %(cliq_args)s. Result=%(out)s") %
+ locals())
+ raise exception.Error(msg)
+
+ result_code = response_node.attrib.get("result")
+
+ if result_code != "0":
+ msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. "
+ " Result=%(out)s") %
+ locals())
+ raise exception.Error(msg)
+
+ return result_xml
+
+ def _cliq_get_cluster_info(self, cluster_name):
+ """Queries for info about the cluster (including IP)"""
+ cliq_args = {}
+ cliq_args['clusterName'] = cluster_name
+ cliq_args['searchDepth'] = '1'
+ cliq_args['verbose'] = '0'
+
+ result_xml = self._cliq_run_xml("getClusterInfo", cliq_args)
+
+ return result_xml
+
+ def _cliq_get_cluster_vip(self, cluster_name):
+ """Gets the IP on which a cluster shares iSCSI volumes"""
+ cluster_xml = self._cliq_get_cluster_info(cluster_name)
+
+ vips = []
+ for vip in cluster_xml.findall("response/cluster/vip"):
+ vips.append(vip.attrib.get('ipAddress'))
+
+ if len(vips) == 1:
+ return vips[0]
+
+ _xml = ElementTree.tostring(cluster_xml)
+ msg = (_("Unexpected number of virtual ips for cluster "
+ " %(cluster_name)s. Result=%(_xml)s") %
+ locals())
+ raise exception.Error(msg)
+
+ def _cliq_get_volume_info(self, volume_name):
+ """Gets the volume info, including IQN"""
+ cliq_args = {}
+ cliq_args['volumeName'] = volume_name
+ result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args)
+
+ # Result looks like this:
+ #<gauche version="1.0">
+ # <response description="Operation succeeded." name="CliqSuccess"
+ # processingTime="87" result="0">
+ # <volume autogrowPages="4" availability="online" blockSize="1024"
+ # bytesWritten="0" checkSum="false" clusterName="Cluster01"
+ # created="2011-02-08T19:56:53Z" deleting="false" description=""
+ # groupName="Group01" initialQuota="536870912" isPrimary="true"
+ # iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:vol-b"
+ # maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
+ # minReplication="1" name="vol-b" parity="0" replication="2"
+ # reserveQuota="536870912" scratchQuota="4194304"
+ # serialNumber="9fa5c8b2cca54b2948a63d833097e1ca0000000000006316"
+ # size="1073741824" stridePages="32" thinProvision="true">
+ # <status description="OK" value="2"/>
+ # <permission access="rw"
+ # authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
+ # chapName="chapusername" chapRequired="true" id="25369"
+ # initiatorSecret="" iqn="" iscsiEnabled="true"
+ # loadBalance="true" targetSecret="supersecret"/>
+ # </volume>
+ # </response>
+ #</gauche>
+
+ # Flatten the nodes into a dictionary; use prefixes to avoid collisions
+ volume_attributes = {}
+
+ volume_node = result_xml.find("response/volume")
+ for k, v in volume_node.attrib.items():
+ volume_attributes["volume." + k] = v
+
+ status_node = volume_node.find("status")
+ if not status_node is None:
+ for k, v in status_node.attrib.items():
+ volume_attributes["status." + k] = v
+
+ # We only consider the first permission node
+ permission_node = volume_node.find("permission")
+ if not permission_node is None:
+ for k, v in status_node.attrib.items():
+ volume_attributes["permission." + k] = v
+
+ LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") %
+ locals())
+ return volume_attributes
+
+ def create_volume(self, volume):
+ """Creates a volume."""
+ cliq_args = {}
+ cliq_args['clusterName'] = FLAGS.san_clustername
+ #TODO(justinsb): Should we default to inheriting thinProvision?
+ cliq_args['thinProvision'] = '1' if FLAGS.san_thin_provision else '0'
+ cliq_args['volumeName'] = volume['name']
+ if int(volume['size']) == 0:
+ cliq_args['size'] = '100MB'
+ else:
+ cliq_args['size'] = '%sGB' % volume['size']
+
+ self._cliq_run_xml("createVolume", cliq_args)
+
+ volume_info = self._cliq_get_volume_info(volume['name'])
+ cluster_name = volume_info['volume.clusterName']
+ iscsi_iqn = volume_info['volume.iscsiIqn']
+
+ #TODO(justinsb): Is this always 1? Does it matter?
+ cluster_interface = '1'
+
+ cluster_vip = self._cliq_get_cluster_vip(cluster_name)
+ iscsi_portal = cluster_vip + ":3260," + cluster_interface
+
+ model_update = {}
+ model_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ iscsi_iqn))
+
+ return model_update
+
+ def delete_volume(self, volume):
+ """Deletes a volume."""
+ cliq_args = {}
+ cliq_args['volumeName'] = volume['name']
+ cliq_args['prompt'] = 'false' # Don't confirm
+
+ self._cliq_run_xml("deleteVolume", cliq_args)
+
+ def local_path(self, volume):
+ # TODO(justinsb): Is this needed here?
+ raise exception.Error(_("local_path not supported"))
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ return self._do_export(context, volume, force_create=False)
+
+ def create_export(self, context, volume):
+ return self._do_export(context, volume, force_create=True)
+
+ def _do_export(self, context, volume, force_create):
+ """Supports ensure_export and create_export"""
+ volume_info = self._cliq_get_volume_info(volume['name'])
+
+ is_shared = 'permission.authGroup' in volume_info
+
+ model_update = {}
+
+ should_export = False
+
+ if force_create or not is_shared:
+ should_export = True
+ # Check that we have a project_id
+ project_id = volume['project_id']
+ if not project_id:
+ project_id = context.project_id
+
+ if project_id:
+ #TODO(justinsb): Use a real per-project password here
+ chap_username = 'proj_' + project_id
+ # HP/Lefthand requires that the password be >= 12 characters
+ chap_password = 'project_secret_' + project_id
+ else:
+ msg = (_("Could not determine project for volume %s, "
+ "can't export") %
+ (volume['name']))
+ if force_create:
+ raise exception.Error(msg)
+ else:
+ LOG.warn(msg)
+ should_export = False
+
+ if should_export:
+ cliq_args = {}
+ cliq_args['volumeName'] = volume['name']
+ cliq_args['chapName'] = chap_username
+ cliq_args['targetSecret'] = chap_password
+
+ self._cliq_run_xml("assignVolumeChap", cliq_args)
+
+ model_update['provider_auth'] = ("CHAP %s %s" %
+ (chap_username, chap_password))
+
+ return model_update
+
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+ cliq_args = {}
+ cliq_args['volumeName'] = volume['name']
+
+ self._cliq_run_xml("unassignVolume", cliq_args)
diff --git a/nova/wsgi.py b/nova/wsgi.py
index e01cc1e1e..1eb66d067 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -59,7 +59,6 @@ class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, threads=1000):
- logging.basicConfig()
self.pool = eventlet.GreenPool(threads)
def start(self, application, port, host='0.0.0.0', backlog=128):
@@ -515,10 +514,3 @@ def load_paste_app(filename, appname):
except LookupError:
pass
return app
-
-
-def paste_config_to_flags(config, mixins):
- for k, v in mixins.iteritems():
- value = config.get(k, v)
- converted_value = FLAGS[k].parser.Parse(value)
- setattr(FLAGS, k, converted_value)