summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorRyan Lane <laner@controller>2010-11-24 15:46:22 +0000
committerRyan Lane <laner@controller>2010-11-24 15:46:22 +0000
commit7d462fd04cf799689bcbea0b926f0bd38e64869c (patch)
tree1e032a18194694783dfe5475a782a56a75c746d0 /nova
parentebffd9cb35af4257a085f85abf64e0c2baf31ff0 (diff)
parent6e349f6ea1846c104f620aa68a26cfc753e8977d (diff)
downloadnova-7d462fd04cf799689bcbea0b926f0bd38e64869c.tar.gz
nova-7d462fd04cf799689bcbea0b926f0bd38e64869c.tar.xz
nova-7d462fd04cf799689bcbea0b926f0bd38e64869c.zip
Merge from main branch
Diffstat (limited to 'nova')
-rw-r--r--nova/adminclient.py92
-rw-r--r--nova/api/__init__.py42
-rw-r--r--nova/api/cloud.py20
-rw-r--r--nova/api/ec2/__init__.py9
-rw-r--r--nova/api/ec2/cloud.py99
-rw-r--r--nova/api/ec2/images.py123
-rw-r--r--nova/api/openstack/images.py13
-rw-r--r--nova/api/openstack/servers.py25
-rw-r--r--nova/auth/fakeldap.py11
-rw-r--r--nova/auth/manager.py3
-rw-r--r--nova/compute/disk.py38
-rw-r--r--nova/compute/manager.py152
-rw-r--r--nova/compute/monitor.py4
-rw-r--r--nova/crypto.py15
-rw-r--r--nova/db/api.py181
-rw-r--r--nova/db/sqlalchemy/api.py156
-rw-r--r--nova/db/sqlalchemy/models.py113
-rw-r--r--nova/flags.py10
-rw-r--r--nova/image/glance.py (renamed from nova/image/services/glance/__init__.py)24
-rw-r--r--nova/image/local.py88
-rw-r--r--nova/image/s3.py109
-rw-r--r--nova/image/service.py131
-rw-r--r--nova/image/services/__init__.py0
-rw-r--r--nova/manager.py34
-rw-r--r--nova/network/linux_net.py2
-rw-r--r--nova/network/manager.py118
-rw-r--r--nova/objectstore/bucket.py4
-rw-r--r--nova/objectstore/handler.py4
-rw-r--r--nova/objectstore/image.py4
-rw-r--r--nova/rpc.py34
-rw-r--r--nova/server.py4
-rw-r--r--nova/service.py7
-rw-r--r--nova/test.py156
-rw-r--r--nova/tests/api/__init__.py2
-rw-r--r--nova/tests/api/openstack/fakes.py25
-rw-r--r--nova/tests/api/openstack/test_api.py6
-rw-r--r--nova/tests/api/openstack/test_auth.py18
-rw-r--r--nova/tests/api/openstack/test_flavors.py2
-rw-r--r--nova/tests/api/openstack/test_images.py45
-rw-r--r--nova/tests/api/openstack/test_servers.py32
-rw-r--r--nova/tests/api_unittest.py10
-rw-r--r--nova/tests/cloud_unittest.py35
-rw-r--r--nova/tests/compute_unittest.py21
-rw-r--r--nova/tests/fake_flags.py9
-rw-r--r--nova/tests/misc_unittest.py48
-rw-r--r--nova/tests/network_unittest.py2
-rw-r--r--nova/tests/quota_unittest.py8
-rw-r--r--nova/tests/scheduler_unittest.py2
-rw-r--r--nova/tests/service_unittest.py125
-rw-r--r--nova/tests/virt_unittest.py4
-rw-r--r--nova/tests/volume_unittest.py41
-rw-r--r--nova/twistd.py4
-rw-r--r--nova/utils.py38
-rw-r--r--nova/virt/connection.py25
-rw-r--r--nova/virt/fake.py27
-rw-r--r--nova/virt/libvirt.rescue.qemu.xml.template37
-rw-r--r--nova/virt/libvirt.rescue.uml.xml.template26
-rw-r--r--nova/virt/libvirt.rescue.xen.xml.template34
-rw-r--r--nova/virt/libvirt_conn.py183
-rw-r--r--nova/virt/xenapi.py46
-rw-r--r--nova/volume/driver.py279
-rw-r--r--nova/volume/manager.py138
-rw-r--r--nova/wsgi.py27
63 files changed, 2028 insertions, 1096 deletions
diff --git a/nova/adminclient.py b/nova/adminclient.py
index b7a3d2c32..5a62cce7d 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -22,25 +22,28 @@ Nova User API client library.
import base64
import boto
import httplib
+
+from nova import flags
from boto.ec2.regioninfo import RegionInfo
+FLAGS = flags.FLAGS
+
DEFAULT_CLC_URL = 'http://127.0.0.1:8773'
DEFAULT_REGION = 'nova'
-DEFAULT_ACCESS_KEY = 'admin'
-DEFAULT_SECRET_KEY = 'admin'
class UserInfo(object):
"""
- Information about a Nova user, as parsed through SAX
- fields include:
- username
- accesskey
- secretkey
-
- and an optional field containing a zip with X509 cert & rc
- file
+ Information about a Nova user, as parsed through SAX.
+
+ **Fields Include**
+
+ * username
+ * accesskey
+ * secretkey
+ * file (optional) containing zip of X509 cert & rc file
+
"""
def __init__(self, connection=None, username=None, endpoint=None):
@@ -68,9 +71,13 @@ class UserInfo(object):
class UserRole(object):
"""
Information about a Nova user's role, as parsed through SAX.
- Fields include:
- role
+
+ **Fields include**
+
+ * role
+
"""
+
def __init__(self, connection=None):
self.connection = connection
self.role = None
@@ -90,12 +97,15 @@ class UserRole(object):
class ProjectInfo(object):
"""
- Information about a Nova project, as parsed through SAX
- Fields include:
- projectname
- description
- projectManagerId
- memberIds
+ Information about a Nova project, as parsed through SAX.
+
+ **Fields include**
+
+ * projectname
+ * description
+ * projectManagerId
+ * memberIds
+
"""
def __init__(self, connection=None):
@@ -127,8 +137,11 @@ class ProjectInfo(object):
class ProjectMember(object):
"""
Information about a Nova project member, as parsed through SAX.
- Fields include:
- memberId
+
+ **Fields include**
+
+ * memberId
+
"""
def __init__(self, connection=None):
@@ -150,14 +163,18 @@ class ProjectMember(object):
class HostInfo(object):
"""
- Information about a Nova Host, as parsed through SAX:
- Disk stats
- Running Instances
- Memory stats
- CPU stats
- Network address info
- Firewall info
- Bridge and devices
+ Information about a Nova Host, as parsed through SAX.
+
+ **Fields Include**
+
+ * Disk stats
+ * Running Instances
+ * Memory stats
+ * CPU stats
+ * Network address info
+ * Firewall info
+ * Bridge and devices
+
"""
def __init__(self, connection=None):
@@ -177,9 +194,13 @@ class HostInfo(object):
class NovaAdminClient(object):
- def __init__(self, clc_url=DEFAULT_CLC_URL, region=DEFAULT_REGION,
- access_key=DEFAULT_ACCESS_KEY, secret_key=DEFAULT_SECRET_KEY,
- **kwargs):
+ def __init__(
+ self,
+ clc_url=DEFAULT_CLC_URL,
+ region=DEFAULT_REGION,
+ access_key=FLAGS.aws_access_key_id,
+ secret_key=FLAGS.aws_secret_access_key,
+ **kwargs):
parts = self.split_clc_url(clc_url)
self.clc_url = clc_url
@@ -257,9 +278,12 @@ class NovaAdminClient(object):
[('item', UserRole)])
def get_user_roles(self, user, project=None):
- """Returns a list of roles for the given user. Omitting project will
- return any global roles that the user has. Specifying project will
- return only project specific roles."""
+ """Returns a list of roles for the given user.
+
+ Omitting project will return any global roles that the user has.
+ Specifying project will return only project specific roles.
+
+ """
params = {'User': user}
if project:
params['Project'] = project
diff --git a/nova/api/__init__.py b/nova/api/__init__.py
index 8a1d9fe32..80f9f2109 100644
--- a/nova/api/__init__.py
+++ b/nova/api/__init__.py
@@ -15,15 +15,21 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
Root WSGI middleware for all API controllers.
+
+**Related Flags**
+
+:osapi_subdomain: subdomain running the OpenStack API (default: api)
+:ec2api_subdomain: subdomain running the EC2 API (default: ec2)
+
"""
import routes
import webob.dec
from nova import flags
+from nova import utils
from nova import wsgi
from nova.api import cloudpipe
from nova.api import ec2
@@ -35,37 +41,31 @@ flags.DEFINE_string('osapi_subdomain', 'api',
'subdomain running the OpenStack API')
flags.DEFINE_string('ec2api_subdomain', 'ec2',
'subdomain running the EC2 API')
-flags.DEFINE_string('FAKE_subdomain', None,
- 'set to api or ec2 to fake the subdomain of the host '
- 'for testing')
FLAGS = flags.FLAGS
class API(wsgi.Router):
"""Routes top-level requests to the appropriate controller."""
- def __init__(self):
- osapidomain = {'sub_domain': [FLAGS.osapi_subdomain]}
- ec2domain = {'sub_domain': [FLAGS.ec2api_subdomain]}
- # If someone wants to pretend they're hitting the OSAPI subdomain
- # on their local box, they can set FAKE_subdomain to 'api', which
- # removes subdomain restrictions from the OpenStack API routes below.
- if FLAGS.FAKE_subdomain == 'api':
- osapidomain = {}
- elif FLAGS.FAKE_subdomain == 'ec2':
- ec2domain = {}
+ def __init__(self, default_api):
+ osapi_subdomain = {'sub_domain': [FLAGS.osapi_subdomain]}
+ ec2api_subdomain = {'sub_domain': [FLAGS.ec2api_subdomain]}
+ if default_api == 'os':
+ osapi_subdomain = {}
+ elif default_api == 'ec2':
+ ec2api_subdomain = {}
mapper = routes.Mapper()
mapper.sub_domains = True
+
mapper.connect("/", controller=self.osapi_versions,
- conditions=osapidomain)
+ conditions=osapi_subdomain)
mapper.connect("/v1.0/{path_info:.*}", controller=openstack.API(),
- conditions=osapidomain)
+ conditions=osapi_subdomain)
mapper.connect("/", controller=self.ec2api_versions,
- conditions=ec2domain)
+ conditions=ec2api_subdomain)
mapper.connect("/services/{path_info:.*}", controller=ec2.API(),
- conditions=ec2domain)
- mapper.connect("/cloudpipe/{path_info:.*}", controller=cloudpipe.API())
+ conditions=ec2api_subdomain)
mrh = metadatarequesthandler.MetadataRequestHandler()
for s in ['/latest',
'/2009-04-04',
@@ -78,7 +78,9 @@ class API(wsgi.Router):
'/2007-01-19',
'/1.0']:
mapper.connect('%s/{path_info:.*}' % s, controller=mrh,
- conditions=ec2domain)
+ conditions=ec2api_subdomain)
+
+ mapper.connect("/cloudpipe/{path_info:.*}", controller=cloudpipe.API())
super(API, self).__init__(mapper)
@webob.dec.wsgify
diff --git a/nova/api/cloud.py b/nova/api/cloud.py
index aa84075dc..b8f15019f 100644
--- a/nova/api/cloud.py
+++ b/nova/api/cloud.py
@@ -36,3 +36,23 @@ def reboot(instance_id, context=None):
db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
"args": {"instance_id": instance_ref['id']}})
+
+
+def rescue(instance_id, context):
+ """Rescue the given instance."""
+ instance_ref = db.instance_get_by_internal_id(context, instance_id)
+ host = instance_ref['host']
+ rpc.cast(context,
+ db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "rescue_instance",
+ "args": {"instance_id": instance_ref['id']}})
+
+
+def unrescue(instance_id, context):
+ """Unrescue the given instance."""
+ instance_ref = db.instance_get_by_internal_id(context, instance_id)
+ host = instance_ref['host']
+ rpc.cast(context,
+ db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "unrescue_instance",
+ "args": {"instance_id": instance_ref['id']}})
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 0df4d3710..a6ee16c33 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -15,8 +15,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+"""
+Starting point for routing EC2 requests.
-"""Starting point for routing EC2 requests"""
+"""
import logging
import routes
@@ -238,11 +240,12 @@ class Executor(wsgi.Application):
return self._error(req, type(ex).__name__, str(ex))
def _error(self, req, code, message):
+ logging.error("%s: %s", code, message)
resp = webob.Response()
resp.status = 400
resp.headers['Content-Type'] = 'text/xml'
- resp.body = ('<?xml version="1.0"?>\n'
+ resp.body = str('<?xml version="1.0"?>\n'
'<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
- '<RequestID>?</RequestID></Response>') % (code, message)
+ '<RequestID>?</RequestID></Response>' % (code, message))
return resp
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 07229dd73..9327bf0d4 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -41,7 +41,7 @@ from nova import rpc
from nova import utils
from nova.compute.instance_types import INSTANCE_TYPES
from nova.api import cloud
-from nova.api.ec2 import images
+from nova.image.s3 import S3ImageService
FLAGS = flags.FLAGS
@@ -99,6 +99,8 @@ class CloudController(object):
"""
def __init__(self):
self.network_manager = utils.import_object(FLAGS.network_manager)
+ self.compute_manager = utils.import_object(FLAGS.compute_manager)
+ self.image_service = S3ImageService()
self.setup()
def __str__(self):
@@ -464,24 +466,31 @@ class CloudController(object):
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
+ instance_ec2_id = None
+ instance_data = None
+ if volume.get('instance', None):
+ internal_id = volume['instance']['internal_id']
+ instance_ec2_id = internal_id_to_ec2_id(internal_id)
+ instance_data = '%s[%s]' % (instance_ec2_id,
+ volume['instance']['host'])
v = {}
v['volumeId'] = volume['ec2_id']
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
- if context.user.is_admin():
+ if context.is_admin:
v['status'] = '%s (%s, %s, %s, %s)' % (
volume['status'],
volume['user_id'],
volume['host'],
- volume['instance_id'],
+ instance_data,
volume['mountpoint'])
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': False,
'device': volume['mountpoint'],
- 'instanceId': volume['instance_id'],
+ 'instanceId': instance_ec2_id,
'status': 'attached',
'volume_id': volume['ec2_id']}]
else:
@@ -516,7 +525,10 @@ class CloudController(object):
"args": {"topic": FLAGS.volume_topic,
"volume_id": volume_ref['id']}})
- return {'volumeSet': [self._format_volume(context, volume_ref)]}
+ # TODO(vish): Instance should be None at db layer instead of
+ # trying to lazy load, but for now we turn it into
+ # a dict to avoid an error.
+ return {'volumeSet': [self._format_volume(context, dict(volume_ref))]}
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
@@ -668,7 +680,7 @@ class CloudController(object):
context.project_id)
for floating_ip_ref in iterator:
address = floating_ip_ref['address']
- instance_id = None
+ ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
@@ -706,8 +718,8 @@ class CloudController(object):
"args": {"floating_address": floating_ip_ref['address']}})
return {'releaseResponse': ["Address released."]}
- def associate_address(self, context, ec2_id, public_ip, **kwargs):
- internal_id = ec2_id_to_internal_id(ec2_id)
+ def associate_address(self, context, instance_id, public_ip, **kwargs):
+ internal_id = ec2_id_to_internal_id(instance_id)
instance_ref = db.instance_get_by_internal_id(context, internal_id)
fixed_address = db.instance_get_fixed_address(context,
instance_ref['id'])
@@ -774,7 +786,7 @@ class CloudController(object):
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
if not vpn:
- image = images.get(context, kwargs['image_id'])
+ image = self.image_service.show(context, kwargs['image_id'])
# FIXME(ja): if image is vpn, this breaks
# get defaults from imagestore
@@ -787,8 +799,8 @@ class CloudController(object):
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
# make sure we have access to kernel and ramdisk
- images.get(context, kernel_id)
- images.get(context, ramdisk_id)
+ self.image_service.show(context, kernel_id)
+ self.image_service.show(context, ramdisk_id)
logging.debug("Going to run %s instances...", num_instances)
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
@@ -835,21 +847,21 @@ class CloudController(object):
elevated = context.elevated()
for num in range(num_instances):
- instance_ref = db.instance_create(context, base_options)
- inst_id = instance_ref['id']
- for security_group_id in security_groups:
- db.instance_add_security_group(elevated,
- inst_id,
- security_group_id)
+ instance_ref = self.compute_manager.create_instance(context,
+ security_groups,
+ mac_address=utils.generate_mac(),
+ launch_index=num,
+ **base_options)
+ inst_id = instance_ref['id']
- inst = {}
- inst['mac_address'] = utils.generate_mac()
- inst['launch_index'] = num
internal_id = instance_ref['internal_id']
ec2_id = internal_id_to_ec2_id(internal_id)
- inst['hostname'] = ec2_id
- db.instance_update(context, inst_id, inst)
+
+ self.compute_manager.update_instance(context,
+ inst_id,
+ hostname=ec2_id)
+
# TODO(vish): This probably should be done in the scheduler
# or in compute as a call. The network should be
# allocated after the host is assigned and setup
@@ -895,11 +907,12 @@ class CloudController(object):
id_str)
continue
now = datetime.datetime.utcnow()
- db.instance_update(context,
- instance_ref['id'],
- {'state_description': 'terminating',
- 'state': 0,
- 'terminated_at': now})
+ self.compute_manager.update_instance(context,
+ instance_ref['id'],
+ state_description='terminating',
+ state=0,
+ terminated_at=now)
+
# FIXME(ja): where should network deallocate occur?
address = db.instance_get_floating_address(context,
instance_ref['id'])
@@ -936,8 +949,21 @@ class CloudController(object):
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
- for id_str in instance_id:
- cloud.reboot(id_str, context=context)
+ for ec2_id in instance_id:
+ internal_id = ec2_id_to_internal_id(ec2_id)
+ cloud.reboot(internal_id, context=context)
+ return True
+
+ def rescue_instance(self, context, instance_id, **kwargs):
+ """This is an extension to the normal ec2_api"""
+ internal_id = ec2_id_to_internal_id(instance_id)
+ cloud.rescue(internal_id, context=context)
+ return True
+
+ def unrescue_instance(self, context, instance_id, **kwargs):
+ """This is an extension to the normal ec2_api"""
+ internal_id = ec2_id_to_internal_id(instance_id)
+ cloud.unrescue(internal_id, context=context)
return True
def update_instance(self, context, ec2_id, **kwargs):
@@ -968,20 +994,17 @@ class CloudController(object):
return True
def describe_images(self, context, image_id=None, **kwargs):
- # The objectstore does its own authorization for describe
- imageSet = images.list(context, image_id)
+ imageSet = self.image_service.index(context, image_id)
return {'imagesSet': imageSet}
def deregister_image(self, context, image_id, **kwargs):
- # FIXME: should the objectstore be doing these authorization checks?
- images.deregister(context, image_id)
+ self.image_service.deregister(context, image_id)
return {'imageId': image_id}
def register_image(self, context, image_location=None, **kwargs):
- # FIXME: should the objectstore be doing these authorization checks?
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
- image_id = images.register(context, image_location)
+ image_id = self.image_service.register(context, image_location)
logging.debug("Registered %s as %s" % (image_location, image_id))
return {'imageId': image_id}
@@ -989,7 +1012,7 @@ class CloudController(object):
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
try:
- image = images.list(context, image_id)[0]
+ image = self.image_service.show(context, image_id)
except IndexError:
raise exception.ApiError('invalid id: %s' % image_id)
result = {'image_id': image_id, 'launchPermission': []}
@@ -1008,8 +1031,8 @@ class CloudController(object):
raise exception.ApiError('only group "all" is supported')
if not operation_type in ['add', 'remove']:
raise exception.ApiError('operation_type must be add or remove')
- return images.modify(context, image_id, operation_type)
+ return self.image_service.modify(context, image_id, operation_type)
def update_image(self, context, image_id, **kwargs):
- result = images.update(context, image_id, dict(kwargs))
+ result = self.image_service.update(context, image_id, dict(kwargs))
return result
diff --git a/nova/api/ec2/images.py b/nova/api/ec2/images.py
deleted file mode 100644
index 60f9008e9..000000000
--- a/nova/api/ec2/images.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Proxy AMI-related calls from the cloud controller, to the running
-objectstore service.
-"""
-
-import json
-import urllib
-
-import boto.s3.connection
-
-from nova import exception
-from nova import flags
-from nova import utils
-from nova.auth import manager
-
-
-FLAGS = flags.FLAGS
-
-
-def modify(context, image_id, operation):
- conn(context).make_request(
- method='POST',
- bucket='_images',
- query_args=qs({'image_id': image_id, 'operation': operation}))
-
- return True
-
-
-def update(context, image_id, attributes):
- """update an image's attributes / info.json"""
- attributes.update({"image_id": image_id})
- conn(context).make_request(
- method='POST',
- bucket='_images',
- query_args=qs(attributes))
- return True
-
-
-def register(context, image_location):
- """ rpc call to register a new image based from a manifest """
-
- image_id = utils.generate_uid('ami')
- conn(context).make_request(
- method='PUT',
- bucket='_images',
- query_args=qs({'image_location': image_location,
- 'image_id': image_id}))
-
- return image_id
-
-
-def list(context, filter_list=[]):
- """ return a list of all images that a user can see
-
- optionally filtered by a list of image_id """
-
- if FLAGS.connection_type == 'fake':
- return [{'imageId': 'bar'}]
-
- # FIXME: send along the list of only_images to check for
- response = conn(context).make_request(
- method='GET',
- bucket='_images')
-
- result = json.loads(response.read())
- if not filter_list is None:
- return [i for i in result if i['imageId'] in filter_list]
- return result
-
-
-def get(context, image_id):
- """return a image object if the context has permissions"""
- result = list(context, [image_id])
- if not result:
- raise exception.NotFound('Image %s could not be found' % image_id)
- image = result[0]
- return image
-
-
-def deregister(context, image_id):
- """ unregister an image """
- conn(context).make_request(
- method='DELETE',
- bucket='_images',
- query_args=qs({'image_id': image_id}))
-
-
-def conn(context):
- access = manager.AuthManager().get_access_key(context.user,
- context.project)
- secret = str(context.user.secret)
- calling = boto.s3.connection.OrdinaryCallingFormat()
- return boto.s3.connection.S3Connection(aws_access_key_id=access,
- aws_secret_access_key=secret,
- is_secure=False,
- calling_format=calling,
- port=FLAGS.s3_port,
- host=FLAGS.s3_host)
-
-
-def qs(params):
- pairs = []
- for key in params.keys():
- pairs.append(key + '=' + urllib.quote(params[key]))
- return '&'.join(pairs)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 5bc915e63..cdbdc9bdd 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -17,6 +17,7 @@
from webob import exc
+from nova import context
from nova import flags
from nova import utils
from nova import wsgi
@@ -46,19 +47,23 @@ class Controller(wsgi.Controller):
def detail(self, req):
"""Return all public images in detail."""
+ user_id = req.environ['nova.context']['user']['id']
+ ctxt = context.RequestContext(user_id, user_id)
try:
- images = self._service.detail()
+ images = self._service.detail(ctxt)
images = nova.api.openstack.limited(images, req)
except NotImplementedError:
# Emulate detail() using repeated calls to show()
- images = self._service.index()
+ images = self._service.index(ctxt)
images = nova.api.openstack.limited(images, req)
- images = [self._service.show(i['id']) for i in images]
+ images = [self._service.show(ctxt, i['id']) for i in images]
return dict(images=images)
def show(self, req, id):
"""Return data about the given image id."""
- return dict(image=self._service.show(id))
+ user_id = req.environ['nova.context']['user']['id']
+ ctxt = context.RequestContext(user_id, user_id)
+ return dict(image=self._service.show(ctxt, id))
def delete(self, req, id):
# Only public images are supported for now.
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index ef773c3be..1d8aa2fa4 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -95,6 +95,7 @@ class Controller(wsgi.Controller):
db_driver = FLAGS.db_driver
self.db_driver = utils.import_object(db_driver)
self.network_manager = utils.import_object(FLAGS.network_manager)
+ self.compute_manager = utils.import_object(FLAGS.compute_manager)
super(Controller, self).__init__()
def index(self, req):
@@ -242,34 +243,30 @@ class Controller(wsgi.Controller):
inst['memory_mb'] = flavor['memory_mb']
inst['vcpus'] = flavor['vcpus']
inst['local_gb'] = flavor['local_gb']
-
- ref = self.db_driver.instance_create(ctxt, inst)
- inst['id'] = ref.internal_id
-
inst['mac_address'] = utils.generate_mac()
-
- #TODO(dietz) is this necessary?
inst['launch_index'] = 0
- inst['hostname'] = str(ref.internal_id)
- self.db_driver.instance_update(ctxt, inst['id'], inst)
+ ref = self.compute_manager.create_instance(ctxt, **inst)
+ inst['id'] = ref['internal_id']
+
+ inst['hostname'] = str(ref['internal_id'])
+ self.compute_manager.update_instance(ctxt, inst['id'], **inst)
- network_manager = utils.import_object(FLAGS.network_manager)
- address = network_manager.allocate_fixed_ip(ctxt,
- inst['id'])
+ address = self.network_manager.allocate_fixed_ip(ctxt,
+ inst['id'])
# TODO(vish): This probably should be done in the scheduler
# network is setup when host is assigned
- network_topic = self._get_network_topic(ctxt, network_manager)
+ network_topic = self._get_network_topic(ctxt)
rpc.call(ctxt,
network_topic,
{"method": "setup_fixed_ip",
"args": {"address": address}})
return inst
- def _get_network_topic(self, context, network_manager):
+ def _get_network_topic(self, context):
"""Retrieves the network host for a project"""
- network_ref = network_manager.get_network(context)
+ network_ref = self.network_manager.get_network(context)
host = network_ref['host']
if not host:
host = rpc.call(context,
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index cf3a84a5d..46e0135b4 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -15,12 +15,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Fake LDAP server for test harnesses.
+"""Fake LDAP server for test harness, backs to ReDIS.
This class does very little error checking, and knows nothing about ldap
-class definitions. It implements the minimum emulation of the python ldap
+class definitions. It implements the minimum emulation of the python ldap
library to work with nova.
+
"""
import json
@@ -77,9 +77,8 @@ def initialize(_uri):
def _match_query(query, attrs):
"""Match an ldap query to an attribute dictionary.
- &, |, and ! are supported in the query. No syntax checking is performed,
- so malformed querys will not work correctly.
-
+ The characters &, |, and ! are supported in the query. No syntax checking
+ is performed, so malformed querys will not work correctly.
"""
# cut off the parentheses
inner = query[1:-1]
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 001a96875..7b2b68161 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -84,12 +84,11 @@ class AuthBase(object):
@classmethod
def safe_id(cls, obj):
- """Safe get object id
+ """Safely get object id.
This method will return the id of the object if the object
is of this class, otherwise it will return the original object.
This allows methods to accept objects or ids as paramaters.
-
"""
if isinstance(obj, cls):
return obj.id
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index e362b4507..4338d39f0 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -15,10 +15,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
Utility methods to resize, repartition, and modify disk images.
+
Includes injection of SSH PGP keys into authorized_keys file.
+
"""
import logging
@@ -41,20 +42,23 @@ flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
@defer.inlineCallbacks
def partition(infile, outfile, local_bytes=0, resize=True,
local_type='ext2', execute=None):
- """Takes a single partition represented by infile and writes a bootable
- drive image into outfile.
+ """
+ Turns a partition (infile) into a bootable drive image (outfile).
The first 63 sectors (0-62) of the resulting image is a master boot record.
Infile becomes the first primary partition.
If local bytes is specified, a second primary partition is created and
formatted as ext2.
- In the diagram below, dashes represent drive sectors.
- +-----+------. . .-------+------. . .------+
- | 0 a| b c|d e|
- +-----+------. . .-------+------. . .------+
- | mbr | primary partiton | local partition |
- +-----+------. . .-------+------. . .------+
+ ::
+
+ In the diagram below, dashes represent drive sectors.
+ +-----+------. . .-------+------. . .------+
+ | 0 a| b c|d e|
+ +-----+------. . .-------+------. . .------+
+ | mbr | primary partiton | local partition |
+ +-----+------. . .-------+------. . .------+
+
"""
sector_size = 512
file_size = os.path.getsize(infile)
@@ -161,6 +165,11 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
@defer.inlineCallbacks
def _inject_key_into_fs(key, fs, execute=None):
+ """Add the given public ssh key to root's authorized_keys.
+
+ key is an ssh key string.
+ fs is the path to the base of the filesystem into which to inject the key.
+ """
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
yield execute('sudo chown root %s' % sshdir)
@@ -171,6 +180,13 @@ def _inject_key_into_fs(key, fs, execute=None):
@defer.inlineCallbacks
def _inject_net_into_fs(net, fs, execute=None):
- netfile = os.path.join(os.path.join(os.path.join(
- fs, 'etc'), 'network'), 'interfaces')
+ """Inject /etc/network/interfaces into the filesystem rooted at fs.
+
+ net is the contents of /etc/network/interfaces.
+ """
+ netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
+ yield execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
+ yield execute('sudo chown root:root %s' % netdir)
+ yield execute('sudo chmod 755 %s' % netdir)
+ netfile = os.path.join(netdir, 'interfaces')
yield execute('sudo tee %s' % netfile, net)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 523bb8893..890d79fba 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -17,13 +17,25 @@
# under the License.
"""
-Handles all code relating to instances (guest vms)
+Handles all processes relating to instances (guest vms).
+
+The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
+handles RPC calls relating to creating instances. It is responsible for
+building a disk image, launching it via the underlying virtualization driver,
+responding to calls to check it state, attaching persistent as well as
+termination.
+
+**Related Flags**
+
+:instances_path: Where instances are kept on disk
+:compute_driver: Name of class that is used to handle virtualization, loaded
+ by :func:`nova.utils.import_object`
+:volume_manager: Name of class that handles persistent storage, loaded by
+ :func:`nova.utils.import_object`
"""
-import base64
import datetime
import logging
-import os
from twisted.internet import defer
@@ -42,12 +54,12 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
class ComputeManager(manager.Manager):
- """
- Manages the running instances.
- """
+ """Manages the running instances from creation to destruction."""
+
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
# TODO(vish): sync driver creation logic with the rest of the system
+ # and redocument the module docstring
if not compute_driver:
compute_driver = FLAGS.compute_driver
self.driver = utils.import_object(compute_driver)
@@ -56,17 +68,63 @@ class ComputeManager(manager.Manager):
super(ComputeManager, self).__init__(*args, **kwargs)
def _update_state(self, context, instance_id):
- """Update the state of an instance from the driver info"""
+ """Update the state of an instance from the driver info."""
# FIXME(ja): include other fields from state?
instance_ref = self.db.instance_get(context, instance_id)
- state = self.driver.get_info(instance_ref.name)['state']
+ try:
+ info = self.driver.get_info(instance_ref['name'])
+ state = info['state']
+ except exception.NotFound:
+ state = power_state.NOSTATE
self.db.instance_set_state(context, instance_id, state)
@defer.inlineCallbacks
@exception.wrap_exception
def refresh_security_group(self, context, security_group_id, **_kwargs):
+ """This call passes stright through to the virtualization driver."""
yield self.driver.refresh_security_group(security_group_id)
+ def create_instance(self, context, security_groups=None, **kwargs):
+ """Creates the instance in the datastore and returns the
+ new instance as a mapping
+
+ :param context: The security context
+ :param security_groups: list of security group ids to
+ attach to the instance
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ created
+
+ :retval Returns a mapping of the instance information
+ that has just been created
+
+ """
+ instance_ref = self.db.instance_create(context, kwargs)
+ inst_id = instance_ref['id']
+
+ elevated = context.elevated()
+ if not security_groups:
+ security_groups = []
+ for security_group_id in security_groups:
+ self.db.instance_add_security_group(elevated,
+ inst_id,
+ security_group_id)
+ return instance_ref
+
+ def update_instance(self, context, instance_id, **kwargs):
+ """Updates the instance in the datastore.
+
+ :param context: The security context
+ :param instance_id: ID of the instance to update
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ updated
+
+ :retval None
+
+ """
+ self.db.instance_update(context, instance_id, kwargs)
+
@defer.inlineCallbacks
@exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs):
@@ -111,6 +169,9 @@ class ComputeManager(manager.Manager):
logging.debug("instance %s: terminating", instance_id)
instance_ref = self.db.instance_get(context, instance_id)
+ volumes = instance_ref.get('volumes', []) or []
+ for volume in volumes:
+ self.detach_volume(context, instance_id, volume['id'])
if instance_ref['state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
raise exception.Error('trying to destroy already destroyed'
@@ -126,16 +187,15 @@ class ComputeManager(manager.Manager):
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this server."""
context = context.elevated()
- self._update_state(context, instance_id)
instance_ref = self.db.instance_get(context, instance_id)
+ self._update_state(context, instance_id)
if instance_ref['state'] != power_state.RUNNING:
- raise exception.Error(
- 'trying to reboot a non-running'
- 'instance: %s (state: %s excepted: %s)' %
- (instance_ref['internal_id'],
- instance_ref['state'],
- power_state.RUNNING))
+ logging.warn('trying to reboot a non-running '
+ 'instance: %s (state: %s excepted: %s)',
+ instance_ref['internal_id'],
+ instance_ref['state'],
+ power_state.RUNNING)
logging.debug('instance %s: rebooting', instance_ref['name'])
self.db.instance_set_state(context,
@@ -145,6 +205,38 @@ class ComputeManager(manager.Manager):
yield self.driver.reboot(instance_ref)
self._update_state(context, instance_id)
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def rescue_instance(self, context, instance_id):
+ """Rescue an instance on this server."""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ logging.debug('instance %s: rescuing',
+ instance_ref['internal_id'])
+ self.db.instance_set_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'rescuing')
+ yield self.driver.rescue(instance_ref)
+ self._update_state(context, instance_id)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def unrescue_instance(self, context, instance_id):
+ """Rescue an instance on this server."""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ logging.debug('instance %s: unrescuing',
+ instance_ref['internal_id'])
+ self.db.instance_set_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'unrescuing')
+ yield self.driver.unrescue(instance_ref)
+ self._update_state(context, instance_id)
+
@exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
@@ -164,10 +256,23 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
dev_path = yield self.volume_manager.setup_compute_volume(context,
volume_id)
- yield self.driver.attach_volume(instance_ref['ec2_id'],
- dev_path,
- mountpoint)
- self.db.volume_attached(context, volume_id, instance_id, mountpoint)
+ try:
+ yield self.driver.attach_volume(instance_ref['name'],
+ dev_path,
+ mountpoint)
+ self.db.volume_attached(context,
+ volume_id,
+ instance_id,
+ mountpoint)
+ except Exception as exc: # pylint: disable-msg=W0702
+ # NOTE(vish): The inline callback eats the exception info so we
+ # log the traceback here and reraise the same
+ # ecxception below.
+ logging.exception("instance %s: attach failed %s, removing",
+ instance_id, mountpoint)
+ yield self.volume_manager.remove_compute_volume(context,
+ volume_id)
+ raise exc
defer.returnValue(True)
@defer.inlineCallbacks
@@ -180,7 +285,12 @@ class ComputeManager(manager.Manager):
volume_id)
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
- yield self.driver.detach_volume(instance_ref['ec2_id'],
- volume_ref['mountpoint'])
+ if instance_ref['name'] not in self.driver.list_instances():
+ logging.warn("Detaching volume from unknown instance %s",
+ instance_ref['name'])
+ else:
+ yield self.driver.detach_volume(instance_ref['name'],
+ volume_ref['mountpoint'])
+ yield self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
defer.returnValue(True)
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index d0154600f..ce45b14f6 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -211,8 +211,8 @@ def store_graph(instance_id, filename):
# the response we can make our own client that does the actual
# request and hands it off to the response parser.
s3 = boto.s3.connection.S3Connection(
- aws_access_key_id='admin',
- aws_secret_access_key='admin',
+ aws_access_key_id=FLAGS.aws_access_key_id,
+ aws_secret_access_key=FLAGS.aws_secret_access_key,
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
port=FLAGS.s3_port,
diff --git a/nova/crypto.py b/nova/crypto.py
index 16b4f5e1f..d73559587 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -15,10 +15,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
-Wrappers around standard crypto, including root and intermediate CAs,
-SSH key_pairs and x509 certificates.
+Wrappers around standard crypto data elements.
+
+Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
+
"""
import base64
@@ -227,12 +228,12 @@ def mkcacert(subject='nova', years=1):
def compute_md5(fp):
"""
- @type fp: file
- @param fp: File pointer to the file to MD5 hash. The file pointer will be
+ :type fp: file
+ :param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
- @rtype: tuple
- @return: the hex digest version of the MD5 hash
+ :rtype: tuple
+ :return: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
diff --git a/nova/db/api.py b/nova/db/api.py
index 0731e2e05..8f9dc2443 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -16,7 +16,17 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Defines interface for DB access
+Defines interface for DB access.
+
+The underlying driver is loaded as a :class:`LazyPluggable`.
+
+**Related Flags**
+
+:db_backend: string to lookup in the list of LazyPluggable backends.
+ `sqlalchemy` is the only supported backend right now.
+
+:sql_connection: string specifying the sqlalchemy connection to use, like:
+ `sqlite:///var/lib/nova/nova.sqlite`.
"""
from nova import exception
@@ -34,17 +44,22 @@ IMPL = utils.LazyPluggable(FLAGS['db_backend'],
class NoMoreAddresses(exception.Error):
- """No more available addresses"""
+ """No more available addresses."""
pass
class NoMoreBlades(exception.Error):
- """No more available blades"""
+ """No more available blades."""
pass
class NoMoreNetworks(exception.Error):
- """No more available networks"""
+ """No more available networks."""
+ pass
+
+
+class NoMoreTargets(exception.Error):
+ """No more available blades"""
pass
@@ -62,30 +77,33 @@ def service_get(context, service_id):
def service_get_all_by_topic(context, topic):
- """Get all compute services for a given topic """
+ """Get all compute services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_compute_sorted(context):
- """Get all compute services sorted by instance count
+ """Get all compute services sorted by instance count.
+
+ Returns a list of (Service, instance_count) tuples.
- Returns a list of (Service, instance_count) tuples
"""
return IMPL.service_get_all_compute_sorted(context)
def service_get_all_network_sorted(context):
- """Get all network services sorted by network count
+ """Get all network services sorted by network count.
+
+ Returns a list of (Service, network_count) tuples.
- Returns a list of (Service, network_count) tuples
"""
return IMPL.service_get_all_network_sorted(context)
def service_get_all_volume_sorted(context):
- """Get all volume services sorted by volume count
+ """Get all volume services sorted by volume count.
+
+ Returns a list of (Service, volume_count) tuples.
- Returns a list of (Service, volume_count) tuples
"""
return IMPL.service_get_all_volume_sorted(context)
@@ -116,6 +134,7 @@ def floating_ip_allocate_address(context, host, project_id):
"""Allocate free floating ip and return the address.
Raises if one is not available.
+
"""
return IMPL.floating_ip_allocate_address(context, host, project_id)
@@ -144,6 +163,7 @@ def floating_ip_disassociate(context, address):
"""Disassociate an floating ip from a fixed ip by address.
Returns the address of the existing fixed ip.
+
"""
return IMPL.floating_ip_disassociate(context, address)
@@ -182,6 +202,7 @@ def fixed_ip_associate(context, address, instance_id):
"""Associate fixed ip to instance.
Raises if fixed ip is not available.
+
"""
return IMPL.fixed_ip_associate(context, address, instance_id)
@@ -190,6 +211,7 @@ def fixed_ip_associate_pool(context, network_id, instance_id):
"""Find free ip in network and associate it to instance.
Raises if one is not available.
+
"""
return IMPL.fixed_ip_associate_pool(context, network_id, instance_id)
@@ -205,7 +227,7 @@ def fixed_ip_disassociate(context, address):
def fixed_ip_disassociate_all_by_timeout(context, host, time):
- """Disassociate old fixed ips from host"""
+ """Disassociate old fixed ips from host."""
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
@@ -283,7 +305,7 @@ def instance_get_floating_address(context, instance_id):
def instance_get_by_internal_id(context, internal_id):
- """Get an instance by ec2 id."""
+ """Get an instance by internal id."""
return IMPL.instance_get_by_internal_id(context, internal_id)
@@ -307,7 +329,7 @@ def instance_update(context, instance_id, values):
def instance_add_security_group(context, instance_id, security_group_id):
- """Associate the given security group with the given instance"""
+ """Associate the given security group with the given instance."""
return IMPL.instance_add_security_group(context, instance_id,
security_group_id)
@@ -369,10 +391,12 @@ def network_count_reserved_ips(context, network_id):
def network_create_safe(context, values):
- """Create a network from the values dict
+ """Create a network from the values dict.
The network is only returned if the create succeeds. If the create violates
- constraints because the network already exists, no exception is raised."""
+ constraints because the network already exists, no exception is raised.
+
+ """
return IMPL.network_create_safe(context, values)
@@ -413,22 +437,22 @@ def network_get_by_instance(context, instance_id):
def network_get_index(context, network_id):
- """Get non-conflicting index for network"""
+ """Get non-conflicting index for network."""
return IMPL.network_get_index(context, network_id)
def network_get_vpn_ip(context, network_id):
- """Get non-conflicting index for network"""
+ """Get non-conflicting index for network."""
return IMPL.network_get_vpn_ip(context, network_id)
def network_set_cidr(context, network_id, cidr):
- """Set the Classless Inner Domain Routing for the network"""
+ """Set the Classless Inner Domain Routing for the network."""
return IMPL.network_set_cidr(context, network_id, cidr)
def network_set_host(context, network_id, host_id):
- """Safely set the host for network"""
+ """Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
@@ -474,25 +498,44 @@ def export_device_create_safe(context, values):
The device is not returned. If the create violates the unique
constraints because the shelf_id and blade_id already exist,
- no exception is raised."""
+ no exception is raised.
+
+ """
return IMPL.export_device_create_safe(context, values)
###################
+def iscsi_target_count_by_host(context, host):
+ """Return count of export devices."""
+ return IMPL.iscsi_target_count_by_host(context, host)
+
+
+def iscsi_target_create_safe(context, values):
+ """Create an iscsi_target from the values dictionary.
+
+ The device is not returned. If the create violates the unique
+ constraints because the iscsi_target and host already exist,
+ no exception is raised."""
+ return IMPL.iscsi_target_create_safe(context, values)
+
+
+###############
+
+
def auth_destroy_token(context, token):
- """Destroy an auth token"""
+ """Destroy an auth token."""
return IMPL.auth_destroy_token(context, token)
def auth_get_token(context, token_hash):
- """Retrieves a token given the hash representing it"""
+ """Retrieves a token given the hash representing it."""
return IMPL.auth_get_token(context, token_hash)
def auth_create_token(context, token):
- """Creates a new token"""
+ """Creates a new token."""
return IMPL.auth_create_token(context, token)
@@ -527,6 +570,11 @@ def volume_allocate_shelf_and_blade(context, volume_id):
return IMPL.volume_allocate_shelf_and_blade(context, volume_id)
+def volume_allocate_iscsi_target(context, volume_id, host):
+ """Atomically allocate a free iscsi_target from the pool."""
+ return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
+
+
def volume_attached(context, volume_id, instance_id, mountpoint):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
@@ -562,9 +610,9 @@ def volume_get_all(context):
return IMPL.volume_get_all(context)
-def volume_get_instance(context, volume_id):
- """Get the instance that a volume is attached to."""
- return IMPL.volume_get_instance(context, volume_id)
+def volume_get_all_by_host(context, host):
+ """Get all volumes belonging to a host."""
+ return IMPL.volume_get_all_by_host(context, host)
def volume_get_all_by_project(context, project_id):
@@ -577,11 +625,21 @@ def volume_get_by_ec2_id(context, ec2_id):
return IMPL.volume_get_by_ec2_id(context, ec2_id)
+def volume_get_instance(context, volume_id):
+ """Get the instance that a volume is attached to."""
+ return IMPL.volume_get_instance(context, volume_id)
+
+
def volume_get_shelf_and_blade(context, volume_id):
"""Get the shelf and blade allocated to the volume."""
return IMPL.volume_get_shelf_and_blade(context, volume_id)
+def volume_get_iscsi_target_num(context, volume_id):
+ """Get the target num (tid) allocated to the volume."""
+ return IMPL.volume_get_iscsi_target_num(context, volume_id)
+
+
def volume_update(context, volume_id, values):
"""Set the given properties on an volume and update it.
@@ -595,47 +653,47 @@ def volume_update(context, volume_id, values):
def security_group_get_all(context):
- """Get all security groups"""
+ """Get all security groups."""
return IMPL.security_group_get_all(context)
def security_group_get(context, security_group_id):
- """Get security group by its internal id"""
+ """Get security group by its internal id."""
return IMPL.security_group_get(context, security_group_id)
def security_group_get_by_name(context, project_id, group_name):
- """Returns a security group with the specified name from a project"""
+ """Returns a security group with the specified name from a project."""
return IMPL.security_group_get_by_name(context, project_id, group_name)
def security_group_get_by_project(context, project_id):
- """Get all security groups belonging to a project"""
+ """Get all security groups belonging to a project."""
return IMPL.security_group_get_by_project(context, project_id)
def security_group_get_by_instance(context, instance_id):
- """Get security groups to which the instance is assigned"""
+ """Get security groups to which the instance is assigned."""
return IMPL.security_group_get_by_instance(context, instance_id)
def security_group_exists(context, project_id, group_name):
- """Indicates if a group name exists in a project"""
+ """Indicates if a group name exists in a project."""
return IMPL.security_group_exists(context, project_id, group_name)
def security_group_create(context, values):
- """Create a new security group"""
+ """Create a new security group."""
return IMPL.security_group_create(context, values)
def security_group_destroy(context, security_group_id):
- """Deletes a security group"""
+ """Deletes a security group."""
return IMPL.security_group_destroy(context, security_group_id)
def security_group_destroy_all(context):
- """Deletes a security group"""
+ """Deletes a security group."""
return IMPL.security_group_destroy_all(context)
@@ -643,18 +701,18 @@ def security_group_destroy_all(context):
def security_group_rule_create(context, values):
- """Create a new security group"""
+ """Create a new security group."""
return IMPL.security_group_rule_create(context, values)
def security_group_rule_get_by_security_group(context, security_group_id):
- """Get all rules for a a given security group"""
+ """Get all rules for a a given security group."""
return IMPL.security_group_rule_get_by_security_group(context,
security_group_id)
def security_group_rule_destroy(context, security_group_rule_id):
- """Deletes a security group rule"""
+ """Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
@@ -662,107 +720,107 @@ def security_group_rule_destroy(context, security_group_rule_id):
def user_get(context, id):
- """Get user by id"""
+ """Get user by id."""
return IMPL.user_get(context, id)
def user_get_by_uid(context, uid):
- """Get user by uid"""
+ """Get user by uid."""
return IMPL.user_get_by_uid(context, uid)
def user_get_by_access_key(context, access_key):
- """Get user by access key"""
+ """Get user by access key."""
return IMPL.user_get_by_access_key(context, access_key)
def user_create(context, values):
- """Create a new user"""
+ """Create a new user."""
return IMPL.user_create(context, values)
def user_delete(context, id):
- """Delete a user"""
+ """Delete a user."""
return IMPL.user_delete(context, id)
def user_get_all(context):
- """Create a new user"""
+ """Create a new user."""
return IMPL.user_get_all(context)
def user_add_role(context, user_id, role):
- """Add another global role for user"""
+ """Add another global role for user."""
return IMPL.user_add_role(context, user_id, role)
def user_remove_role(context, user_id, role):
- """Remove global role from user"""
+ """Remove global role from user."""
return IMPL.user_remove_role(context, user_id, role)
def user_get_roles(context, user_id):
- """Get global roles for user"""
+ """Get global roles for user."""
return IMPL.user_get_roles(context, user_id)
def user_add_project_role(context, user_id, project_id, role):
- """Add project role for user"""
+ """Add project role for user."""
return IMPL.user_add_project_role(context, user_id, project_id, role)
def user_remove_project_role(context, user_id, project_id, role):
- """Remove project role from user"""
+ """Remove project role from user."""
return IMPL.user_remove_project_role(context, user_id, project_id, role)
def user_get_roles_for_project(context, user_id, project_id):
- """Return list of roles a user holds on project"""
+ """Return list of roles a user holds on project."""
return IMPL.user_get_roles_for_project(context, user_id, project_id)
def user_update(context, user_id, values):
- """Update user"""
+ """Update user."""
return IMPL.user_update(context, user_id, values)
def project_get(context, id):
- """Get project by id"""
+ """Get project by id."""
return IMPL.project_get(context, id)
def project_create(context, values):
- """Create a new project"""
+ """Create a new project."""
return IMPL.project_create(context, values)
def project_add_member(context, project_id, user_id):
- """Add user to project"""
+ """Add user to project."""
return IMPL.project_add_member(context, project_id, user_id)
def project_get_all(context):
- """Get all projects"""
+ """Get all projects."""
return IMPL.project_get_all(context)
def project_get_by_user(context, user_id):
- """Get all projects of which the given user is a member"""
+ """Get all projects of which the given user is a member."""
return IMPL.project_get_by_user(context, user_id)
def project_remove_member(context, project_id, user_id):
- """Remove the given user from the given project"""
+ """Remove the given user from the given project."""
return IMPL.project_remove_member(context, project_id, user_id)
def project_update(context, project_id, values):
- """Update Remove the given user from the given project"""
+ """Update Remove the given user from the given project."""
return IMPL.project_update(context, project_id, values)
def project_delete(context, project_id):
- """Delete project"""
+ """Delete project."""
return IMPL.project_delete(context, project_id)
@@ -771,6 +829,7 @@ def project_delete(context, project_id):
def host_get_networks(context, host):
"""Return all networks for which the given host is the designated
- network host
+ network host.
+
"""
return IMPL.host_get_networks(context, host)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 0cbe56499..afa55fc03 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -16,9 +16,10 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Implementation of SQLAlchemy backend
+Implementation of SQLAlchemy backend.
"""
+import random
import warnings
from nova import db
@@ -43,7 +44,6 @@ def is_admin_context(context):
warnings.warn('Use of empty request context is deprecated',
DeprecationWarning)
raise Exception('die')
- return True
return context.is_admin
@@ -235,8 +235,7 @@ def service_get_by_args(context, host, binary):
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
- for (key, value) in values.iteritems():
- service_ref[key] = value
+ service_ref.update(values)
service_ref.save()
return service_ref
@@ -246,8 +245,7 @@ def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = service_get(context, service_id, session=session)
- for (key, value) in values.iteritems():
- service_ref[key] = value
+ service_ref.update(values)
service_ref.save(session=session)
@@ -278,8 +276,7 @@ def floating_ip_allocate_address(context, host, project_id):
@require_context
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
- for (key, value) in values.iteritems():
- floating_ip_ref[key] = value
+ floating_ip_ref.update(values)
floating_ip_ref.save()
return floating_ip_ref['address']
@@ -392,7 +389,7 @@ def floating_ip_get_by_address(context, address, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound('No fixed ip for address %s' % address)
+ raise exception.NotFound('No floating ip for address %s' % address)
return result
@@ -450,8 +447,7 @@ def fixed_ip_associate_pool(context, network_id, instance_id):
@require_context
def fixed_ip_create(_context, values):
fixed_ip_ref = models.FixedIp()
- for (key, value) in values.iteritems():
- fixed_ip_ref[key] = value
+ fixed_ip_ref.update(values)
fixed_ip_ref.save()
return fixed_ip_ref['address']
@@ -505,14 +501,14 @@ def fixed_ip_get_by_address(context, address, session=None):
@require_context
def fixed_ip_get_instance(context, address):
- fixed_ip_ref = fixed_ip_get_by_address(context, address)
- return fixed_ip_ref.instance
+ fixed_ip_ref = fixed_ip_get_by_address(context, address)
+ return fixed_ip_ref.instance
@require_admin_context
def fixed_ip_get_network(context, address):
- fixed_ip_ref = fixed_ip_get_by_address(context, address)
- return fixed_ip_ref.network
+ fixed_ip_ref = fixed_ip_get_by_address(context, address)
+ return fixed_ip_ref.network
@require_context
@@ -522,8 +518,7 @@ def fixed_ip_update(context, address, values):
fixed_ip_ref = fixed_ip_get_by_address(context,
address,
session=session)
- for (key, value) in values.iteritems():
- fixed_ip_ref[key] = value
+ fixed_ip_ref.update(values)
fixed_ip_ref.save(session=session)
@@ -536,13 +531,13 @@ def fixed_ip_update(context, address, values):
@require_context
def instance_create(context, values):
instance_ref = models.Instance()
- for (key, value) in values.iteritems():
- instance_ref[key] = value
+ instance_ref.update(values)
session = get_session()
with session.begin():
while instance_ref.internal_id == None:
- internal_id = utils.generate_uid(instance_ref.__prefix__)
+ # Instances have integer internal ids.
+ internal_id = random.randint(0, 2 ** 32 - 1)
if not instance_internal_id_exists(context, internal_id,
session=session):
instance_ref.internal_id = internal_id
@@ -729,8 +724,7 @@ def instance_update(context, instance_id, values):
session = get_session()
with session.begin():
instance_ref = instance_get(context, instance_id, session=session)
- for (key, value) in values.iteritems():
- instance_ref[key] = value
+ instance_ref.update(values)
instance_ref.save(session=session)
@@ -752,8 +746,7 @@ def instance_add_security_group(context, instance_id, security_group_id):
@require_context
def key_pair_create(context, values):
key_pair_ref = models.KeyPair()
- for (key, value) in values.iteritems():
- key_pair_ref[key] = value
+ key_pair_ref.update(values)
key_pair_ref.save()
return key_pair_ref
@@ -868,8 +861,7 @@ def network_count_reserved_ips(context, network_id):
@require_admin_context
def network_create_safe(context, values):
network_ref = models.Network()
- for (key, value) in values.iteritems():
- network_ref[key] = value
+ network_ref.update(values)
try:
network_ref.save()
return network_ref
@@ -978,8 +970,7 @@ def network_update(context, network_id, values):
session = get_session()
with session.begin():
network_ref = network_get(context, network_id, session=session)
- for (key, value) in values.iteritems():
- network_ref[key] = value
+ network_ref.update(values)
network_ref.save(session=session)
@@ -1029,8 +1020,7 @@ def export_device_count(context):
@require_admin_context
def export_device_create_safe(context, values):
export_device_ref = models.ExportDevice()
- for (key, value) in values.iteritems():
- export_device_ref[key] = value
+ export_device_ref.update(values)
try:
export_device_ref.save()
return export_device_ref
@@ -1041,6 +1031,30 @@ def export_device_create_safe(context, values):
###################
+@require_admin_context
+def iscsi_target_count_by_host(context, host):
+ session = get_session()
+ return session.query(models.IscsiTarget).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(host=host).\
+ count()
+
+
+@require_admin_context
+def iscsi_target_create_safe(context, values):
+ iscsi_target_ref = models.IscsiTarget()
+ for (key, value) in values.iteritems():
+ iscsi_target_ref[key] = value
+ try:
+ iscsi_target_ref.save()
+ return iscsi_target_ref
+ except IntegrityError:
+ return None
+
+
+###################
+
+
def auth_destroy_token(_context, token):
session = get_session()
session.delete(token)
@@ -1058,8 +1072,7 @@ def auth_get_token(_context, token_hash):
def auth_create_token(_context, token):
tk = models.AuthToken()
- for k, v in token.iteritems():
- tk[k] = v
+ tk.update(token)
tk.save()
return tk
@@ -1085,8 +1098,7 @@ def quota_get(context, project_id, session=None):
@require_admin_context
def quota_create(context, values):
quota_ref = models.Quota()
- for (key, value) in values.iteritems():
- quota_ref[key] = value
+ quota_ref.update(values)
quota_ref.save()
return quota_ref
@@ -1096,8 +1108,7 @@ def quota_update(context, project_id, values):
session = get_session()
with session.begin():
quota_ref = quota_get(context, project_id, session=session)
- for (key, value) in values.iteritems():
- quota_ref[key] = value
+ quota_ref.update(values)
quota_ref.save(session=session)
@@ -1131,6 +1142,25 @@ def volume_allocate_shelf_and_blade(context, volume_id):
@require_admin_context
+def volume_allocate_iscsi_target(context, volume_id, host):
+ session = get_session()
+ with session.begin():
+ iscsi_target_ref = session.query(models.IscsiTarget).\
+ filter_by(volume=None).\
+ filter_by(host=host).\
+ filter_by(deleted=False).\
+ with_lockmode('update').\
+ first()
+ # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
+ # then this has concurrency issues
+ if not iscsi_target_ref:
+ raise db.NoMoreTargets()
+ iscsi_target_ref.volume_id = volume_id
+ session.add(iscsi_target_ref)
+ return iscsi_target_ref.target_num
+
+
+@require_admin_context
def volume_attached(context, volume_id, instance_id, mountpoint):
session = get_session()
with session.begin():
@@ -1146,13 +1176,12 @@ def volume_attached(context, volume_id, instance_id, mountpoint):
@require_context
def volume_create(context, values):
volume_ref = models.Volume()
- for (key, value) in values.iteritems():
- volume_ref[key] = value
+ volume_ref.update(values)
session = get_session()
with session.begin():
while volume_ref.ec2_id == None:
- ec2_id = utils.generate_uid(volume_ref.__prefix__)
+ ec2_id = utils.generate_uid('vol')
if not volume_ec2_id_exists(context, ec2_id, session=session):
volume_ref.ec2_id = ec2_id
volume_ref.save(session=session)
@@ -1181,6 +1210,9 @@ def volume_destroy(context, volume_id):
session.execute('update export_devices set volume_id=NULL '
'where volume_id=:id',
{'id': volume_id})
+ session.execute('update iscsi_targets set volume_id=NULL '
+ 'where volume_id=:id',
+ {'id': volume_id})
@require_admin_context
@@ -1222,6 +1254,17 @@ def volume_get(context, volume_id, session=None):
def volume_get_all(context):
session = get_session()
return session.query(models.Volume).\
+ options(joinedload('instance')).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_admin_context
+def volume_get_all_by_host(context, host):
+ session = get_session()
+ return session.query(models.Volume).\
+ options(joinedload('instance')).\
+ filter_by(host=host).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -1232,6 +1275,7 @@ def volume_get_all_by_project(context, project_id):
session = get_session()
return session.query(models.Volume).\
+ options(joinedload('instance')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -1299,13 +1343,25 @@ def volume_get_shelf_and_blade(context, volume_id):
return (result.shelf_id, result.blade_id)
+@require_admin_context
+def volume_get_iscsi_target_num(context, volume_id):
+ session = get_session()
+ result = session.query(models.IscsiTarget).\
+ filter_by(volume_id=volume_id).\
+ first()
+ if not result:
+ raise exception.NotFound('No target id found for volume %s' %
+ volume_id)
+
+ return result.target_num
+
+
@require_context
def volume_update(context, volume_id, values):
session = get_session()
with session.begin():
volume_ref = volume_get(context, volume_id, session=session)
- for (key, value) in values.iteritems():
- volume_ref[key] = value
+ volume_ref.update(values)
volume_ref.save(session=session)
@@ -1398,8 +1454,7 @@ def security_group_create(context, values):
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
- for (key, value) in values.iteritems():
- security_group_ref[key] = value
+ security_group_ref.update(values)
security_group_ref.save()
return security_group_ref
@@ -1453,8 +1508,7 @@ def security_group_rule_get(context, security_group_rule_id, session=None):
@require_context
def security_group_rule_create(context, values):
security_group_rule_ref = models.SecurityGroupIngressRule()
- for (key, value) in values.iteritems():
- security_group_rule_ref[key] = value
+ security_group_rule_ref.update(values)
security_group_rule_ref.save()
return security_group_rule_ref
@@ -1506,8 +1560,7 @@ def user_get_by_access_key(context, access_key, session=None):
@require_admin_context
def user_create(_context, values):
user_ref = models.User()
- for (key, value) in values.iteritems():
- user_ref[key] = value
+ user_ref.update(values)
user_ref.save()
return user_ref
@@ -1535,8 +1588,7 @@ def user_get_all(context):
def project_create(_context, values):
project_ref = models.Project()
- for (key, value) in values.iteritems():
- project_ref[key] = value
+ project_ref.update(values)
project_ref.save()
return project_ref
@@ -1598,8 +1650,7 @@ def user_update(context, user_id, values):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
- for (key, value) in values.iteritems():
- user_ref[key] = value
+ user_ref.update(values)
user_ref.save(session=session)
@@ -1607,8 +1658,7 @@ def project_update(context, project_id, values):
session = get_session()
with session.begin():
project_ref = project_get(context, project_id, session=session)
- for (key, value) in values.iteritems():
- project_ref[key] = value
+ project_ref.update(values)
project_ref.save(session=session)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 38c96bdec..01b5cf350 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -15,16 +15,13 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
-SQLAlchemy models for nova data
+SQLAlchemy models for nova data.
"""
-import sys
import datetime
-# TODO(vish): clean up these imports
-from sqlalchemy.orm import relationship, backref, exc, object_mapper
+from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text
from sqlalchemy.exc import IntegrityError
@@ -37,28 +34,22 @@ from nova import auth
from nova import exception
from nova import flags
-FLAGS = flags.FLAGS
+FLAGS = flags.FLAGS
BASE = declarative_base()
class NovaBase(object):
- """Base class for Nova Models"""
+ """Base class for Nova Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
- __prefix__ = 'none'
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(DateTime, onupdate=datetime.datetime.utcnow)
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
- @property
- def str_id(self):
- """Get string id of object (generally prefix + '-' + id)"""
- return "%s-%s" % (self.__prefix__, self.id)
-
def save(self, session=None):
- """Save this object"""
+ """Save this object."""
if not session:
session = get_session()
session.add(self)
@@ -71,7 +62,7 @@ class NovaBase(object):
raise
def delete(self, session=None):
- """Delete this object"""
+ """Delete this object."""
self.deleted = True
self.deleted_at = datetime.datetime.utcnow()
self.save(session=session)
@@ -82,6 +73,9 @@ class NovaBase(object):
def __getitem__(self, key):
return getattr(self, key)
+ def get(self, key, default=None):
+ return getattr(self, key, default)
+
def __iter__(self):
self._i = iter(object_mapper(self).columns)
return self
@@ -90,11 +84,20 @@ class NovaBase(object):
n = self._i.next().name
return n, getattr(self, n)
+ def update(self, values):
+ """Make the model object behave like a dict"""
+ for k, v in values.iteritems():
+ setattr(self, k, v)
+
+ def iteritems(self):
+ """Make the model object behave like a dict"""
+ return iter(self)
+
+
# TODO(vish): Store images in the database instead of file system
#class Image(BASE, NovaBase):
# """Represents an image in the datastore"""
# __tablename__ = 'images'
-# __prefix__ = 'ami'
# id = Column(Integer, primary_key=True)
# ec2_id = Column(String(12), unique=True)
# user_id = Column(String(255))
@@ -137,7 +140,8 @@ class NovaBase(object):
class Service(BASE, NovaBase):
- """Represents a running service on a host"""
+ """Represents a running service on a host."""
+
__tablename__ = 'services'
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
@@ -148,9 +152,8 @@ class Service(BASE, NovaBase):
class Instance(BASE, NovaBase):
- """Represents a guest vm"""
+ """Represents a guest vm."""
__tablename__ = 'instances'
- __prefix__ = 'i'
id = Column(Integer, primary_key=True)
internal_id = Column(Integer, unique=True)
@@ -225,9 +228,8 @@ class Instance(BASE, NovaBase):
class Volume(BASE, NovaBase):
- """Represents a block storage device that can be attached to a vm"""
+ """Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
- __prefix__ = 'vol'
id = Column(Integer, primary_key=True)
ec2_id = Column(String(12), unique=True)
@@ -255,9 +257,13 @@ class Volume(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
+ @property
+ def name(self):
+ return self.ec2_id
+
class Quota(BASE, NovaBase):
- """Represents quota overrides for a project"""
+ """Represents quota overrides for a project."""
__tablename__ = 'quotas'
id = Column(Integer, primary_key=True)
@@ -269,13 +275,9 @@ class Quota(BASE, NovaBase):
gigabytes = Column(Integer)
floating_ips = Column(Integer)
- @property
- def str_id(self):
- return self.project_id
-
class ExportDevice(BASE, NovaBase):
- """Represates a shelf and blade that a volume can be exported on"""
+ """Represates a shelf and blade that a volume can be exported on."""
__tablename__ = 'export_devices'
__table_args__ = (schema.UniqueConstraint("shelf_id", "blade_id"),
{'mysql_engine': 'InnoDB'})
@@ -290,6 +292,22 @@ class ExportDevice(BASE, NovaBase):
'ExportDevice.deleted==False)')
+class IscsiTarget(BASE, NovaBase):
+ """Represates an iscsi target for a given host"""
+ __tablename__ = 'iscsi_targets'
+ __table_args__ = (schema.UniqueConstraint("target_num", "host"),
+ {'mysql_engine': 'InnoDB'})
+ id = Column(Integer, primary_key=True)
+ target_num = Column(Integer)
+ host = Column(String(255))
+ volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True)
+ volume = relationship(Volume,
+ backref=backref('iscsi_target', uselist=False),
+ foreign_keys=volume_id,
+ primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
+ 'IscsiTarget.deleted==False)')
+
+
class SecurityGroupInstanceAssociation(BASE, NovaBase):
__tablename__ = 'security_group_instance_association'
id = Column(Integer, primary_key=True)
@@ -298,7 +316,7 @@ class SecurityGroupInstanceAssociation(BASE, NovaBase):
class SecurityGroup(BASE, NovaBase):
- """Represents a security group"""
+ """Represents a security group."""
__tablename__ = 'security_groups'
id = Column(Integer, primary_key=True)
@@ -328,7 +346,7 @@ class SecurityGroup(BASE, NovaBase):
class SecurityGroupIngressRule(BASE, NovaBase):
- """Represents a rule in a security group"""
+ """Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
id = Column(Integer, primary_key=True)
@@ -350,7 +368,7 @@ class SecurityGroupIngressRule(BASE, NovaBase):
class KeyPair(BASE, NovaBase):
- """Represents a public key pair for ssh"""
+ """Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
id = Column(Integer, primary_key=True)
@@ -361,13 +379,9 @@ class KeyPair(BASE, NovaBase):
fingerprint = Column(String(255))
public_key = Column(Text)
- @property
- def str_id(self):
- return '%s.%s' % (self.user_id, self.name)
-
class Network(BASE, NovaBase):
- """Represents a network"""
+ """Represents a network."""
__tablename__ = 'networks'
__table_args__ = (schema.UniqueConstraint("vpn_public_address",
"vpn_public_port"),
@@ -396,9 +410,12 @@ class Network(BASE, NovaBase):
class AuthToken(BASE, NovaBase):
- """Represents an authorization token for all API transactions. Fields
- are a string representing the actual token and a user id for mapping
- to the actual user"""
+ """Represents an authorization token for all API transactions.
+
+ Fields are a string representing the actual token and a user id for
+ mapping to the actual user
+
+ """
__tablename__ = 'auth_tokens'
token_hash = Column(String(255), primary_key=True)
user_id = Column(Integer)
@@ -409,7 +426,7 @@ class AuthToken(BASE, NovaBase):
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
- """Represents a fixed ip for an instance"""
+ """Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
@@ -426,13 +443,9 @@ class FixedIp(BASE, NovaBase):
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
- @property
- def str_id(self):
- return self.address
-
class User(BASE, NovaBase):
- """Represents a user"""
+ """Represents a user."""
__tablename__ = 'users'
id = Column(String(255), primary_key=True)
@@ -444,7 +457,7 @@ class User(BASE, NovaBase):
class Project(BASE, NovaBase):
- """Represents a project"""
+ """Represents a project."""
__tablename__ = 'projects'
id = Column(String(255), primary_key=True)
name = Column(String(255))
@@ -492,7 +505,7 @@ class UserProjectAssociation(BASE, NovaBase):
class FloatingIp(BASE, NovaBase):
- """Represents a floating ip that dynamically forwards to a fixed ip"""
+ """Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
@@ -508,9 +521,13 @@ class FloatingIp(BASE, NovaBase):
def register_models():
- """Register Models and create metadata"""
+ """Register Models and create metadata.
+
+ Called from nova.db.sqlalchemy.__init__ as part of loading the driver,
+ it will never need to be called explicitly elsewhere.
+ """
from sqlalchemy import create_engine
- models = (Service, Instance, Volume, ExportDevice, FixedIp,
+ models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp,
FloatingIp, Network, SecurityGroup,
SecurityGroupIngressRule, SecurityGroupInstanceAssociation,
AuthToken, User, Project) # , Image, Host
diff --git a/nova/flags.py b/nova/flags.py
index f3b0384ad..f7ae26050 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -138,6 +138,8 @@ class FlagValues(gflags.FlagValues):
FLAGS = FlagValues()
+gflags.FLAGS = FLAGS
+gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
def _wrapper(func):
@@ -177,6 +179,8 @@ DEFINE_list('region_list',
[],
'list of region=url pairs separated by commas')
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
+DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
+DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
@@ -194,6 +198,8 @@ DEFINE_integer('rabbit_port', 5672, 'rabbit port')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
+DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
+DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('cc_host', '127.0.0.1', 'ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
@@ -224,13 +230,13 @@ DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
'Manager for compute')
DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
'Manager for network')
-DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
+DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
'Manager for volume')
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
# The service to use for image search and retrieval
-DEFINE_string('image_service', 'nova.image.service.LocalImageService',
+DEFINE_string('image_service', 'nova.image.local.LocalImageService',
'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(),
diff --git a/nova/image/services/glance/__init__.py b/nova/image/glance.py
index f1d05f0bc..1ca6cf2eb 100644
--- a/nova/image/services/glance/__init__.py
+++ b/nova/image/glance.py
@@ -19,6 +19,7 @@
import httplib
import json
+import logging
import urlparse
import webob.exc
@@ -31,6 +32,17 @@ import nova.image.service
FLAGS = flags.FLAGS
+flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1',
+ 'IP address or URL where Glance\'s Teller service resides')
+flags.DEFINE_string('glance_teller_port', '9191',
+ 'Port for Glance\'s Teller service')
+flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1',
+ 'IP address or URL where Glance\'s Parallax service '
+ 'resides')
+flags.DEFINE_string('glance_parallax_port', '9292',
+ 'Port for Glance\'s Parallax service')
+
+
class TellerClient(object):
def __init__(self):
@@ -160,21 +172,21 @@ class GlanceImageService(nova.image.service.BaseImageService):
self.teller = TellerClient()
self.parallax = ParallaxClient()
- def index(self):
+ def index(self, context):
"""
Calls out to Parallax for a list of images available
"""
images = self.parallax.get_image_index()
return images
- def detail(self):
+ def detail(self, context):
"""
Calls out to Parallax for a list of detailed image information
"""
images = self.parallax.get_image_details()
return images
- def show(self, id):
+ def show(self, context, id):
"""
Returns a dict containing image data for the given opaque image id.
"""
@@ -183,7 +195,7 @@ class GlanceImageService(nova.image.service.BaseImageService):
return image
raise exception.NotFound
- def create(self, data):
+ def create(self, context, data):
"""
Store the image data and return the new image id.
@@ -192,7 +204,7 @@ class GlanceImageService(nova.image.service.BaseImageService):
"""
return self.parallax.add_image_metadata(data)
- def update(self, image_id, data):
+ def update(self, context, image_id, data):
"""Replace the contents of the given image with the new data.
:raises NotFound if the image does not exist.
@@ -200,7 +212,7 @@ class GlanceImageService(nova.image.service.BaseImageService):
"""
self.parallax.update_image_metadata(image_id, data)
- def delete(self, image_id):
+ def delete(self, context, image_id):
"""
Delete the given image.
diff --git a/nova/image/local.py b/nova/image/local.py
new file mode 100644
index 000000000..9b0cdcc50
--- /dev/null
+++ b/nova/image/local.py
@@ -0,0 +1,88 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import cPickle as pickle
+import os.path
+import random
+
+from nova import exception
+from nova.image import service
+
+
+class LocalImageService(service.BaseImageService):
+
+ """Image service storing images to local disk.
+
+ It assumes that image_ids are integers."""
+
+ def __init__(self):
+ self._path = "/tmp/nova/images"
+ try:
+ os.makedirs(self._path)
+ except OSError: # Exists
+ pass
+
+ def _path_to(self, image_id):
+ return os.path.join(self._path, str(image_id))
+
+ def _ids(self):
+ """The list of all image ids."""
+ return [int(i) for i in os.listdir(self._path)]
+
+ def index(self, context):
+ return [dict(id=i['id'], name=i['name']) for i in self.detail(context)]
+
+ def detail(self, context):
+ return [self.show(context, id) for id in self._ids()]
+
+ def show(self, context, id):
+ try:
+ return pickle.load(open(self._path_to(id)))
+ except IOError:
+ raise exception.NotFound
+
+ def create(self, context, data):
+ """
+ Store the image data and return the new image id.
+ """
+ id = random.randint(0, 2 ** 32 - 1)
+ data['id'] = id
+ self.update(context, id, data)
+ return id
+
+ def update(self, context, image_id, data):
+ """Replace the contents of the given image with the new data."""
+ try:
+ pickle.dump(data, open(self._path_to(image_id), 'w'))
+ except IOError:
+ raise exception.NotFound
+
+ def delete(self, context, image_id):
+ """
+ Delete the given image. Raises OSError if the image does not exist.
+ """
+ try:
+ os.unlink(self._path_to(image_id))
+ except IOError:
+ raise exception.NotFound
+
+ def delete_all(self):
+ """
+ Clears out all images in local directory
+ """
+ for id in self._ids():
+ os.unlink(self._path_to(id))
diff --git a/nova/image/s3.py b/nova/image/s3.py
new file mode 100644
index 000000000..0a25161de
--- /dev/null
+++ b/nova/image/s3.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Proxy AMI-related calls from the cloud controller, to the running
+objectstore service.
+"""
+
+import json
+import urllib
+
+import boto.s3.connection
+
+from nova import exception
+from nova import flags
+from nova import utils
+from nova.auth import manager
+from nova.image import service
+
+
+FLAGS = flags.FLAGS
+
+
+class S3ImageService(service.BaseImageService):
+
+ def modify(self, context, image_id, operation):
+ self._conn(context).make_request(
+ method='POST',
+ bucket='_images',
+ query_args=self._qs({'image_id': image_id,
+ 'operation': operation}))
+ return True
+
+ def update(self, context, image_id, attributes):
+ """update an image's attributes / info.json"""
+ attributes.update({"image_id": image_id})
+ self._conn(context).make_request(
+ method='POST',
+ bucket='_images',
+ query_args=self._qs(attributes))
+ return True
+
+ def register(self, context, image_location):
+ """ rpc call to register a new image based from a manifest """
+ image_id = utils.generate_uid('ami')
+ self._conn(context).make_request(
+ method='PUT',
+ bucket='_images',
+ query_args=self._qs({'image_location': image_location,
+ 'image_id': image_id}))
+ return image_id
+
+ def index(self, context):
+ """Return a list of all images that a user can see."""
+ response = self._conn(context).make_request(
+ method='GET',
+ bucket='_images')
+ return json.loads(response.read())
+
+ def show(self, context, image_id):
+ """return a image object if the context has permissions"""
+ if FLAGS.connection_type == 'fake':
+ return {'imageId': 'bar'}
+ result = self.index(context)
+ result = [i for i in result if i['imageId'] == image_id]
+ if not result:
+ raise exception.NotFound('Image %s could not be found' % image_id)
+ image = result[0]
+ return image
+
+ def deregister(self, context, image_id):
+ """ unregister an image """
+ self._conn(context).make_request(
+ method='DELETE',
+ bucket='_images',
+ query_args=self._qs({'image_id': image_id}))
+
+ def _conn(self, context):
+ access = manager.AuthManager().get_access_key(context.user,
+ context.project)
+ secret = str(context.user.secret)
+ calling = boto.s3.connection.OrdinaryCallingFormat()
+ return boto.s3.connection.S3Connection(aws_access_key_id=access,
+ aws_secret_access_key=secret,
+ is_secure=False,
+ calling_format=calling,
+ port=FLAGS.s3_port,
+ host=FLAGS.s3_host)
+
+ def _qs(self, params):
+ pairs = []
+ for key in params.keys():
+ pairs.append(key + '=' + urllib.quote(params[key]))
+ return '&'.join(pairs)
diff --git a/nova/image/service.py b/nova/image/service.py
index 37cadddcc..ebee2228d 100644
--- a/nova/image/service.py
+++ b/nova/image/service.py
@@ -15,64 +15,38 @@
# License for the specific language governing permissions and limitations
# under the License.
-import cPickle as pickle
-import os.path
-import random
-
-from nova import flags
-from nova import exception
-
-FLAGS = flags.FLAGS
-
-
-flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1',
- 'IP address or URL where Glance\'s Teller service resides')
-flags.DEFINE_string('glance_teller_port', '9191',
- 'Port for Glance\'s Teller service')
-flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1',
- 'IP address or URL where Glance\'s Parallax service '
- 'resides')
-flags.DEFINE_string('glance_parallax_port', '9292',
- 'Port for Glance\'s Parallax service')
-
class BaseImageService(object):
"""Base class for providing image search and retrieval services"""
- def index(self):
+ def index(self, context):
"""
Returns a sequence of mappings of id and name information about
images.
- :retval a sequence of mappings with the following signature:
-
- [
- {'id': opaque id of image,
- 'name': name of image
- }, ...
- ]
+ :rtype: array
+ :retval: a sequence of mappings with the following signature
+ {'id': opaque id of image, 'name': name of image}
"""
raise NotImplementedError
- def detail(self):
+ def detail(self, context):
"""
Returns a sequence of mappings of detailed information about images.
- :retval a sequence of mappings with the following signature:
-
- [
- {'id': opaque id of image,
- 'name': name of image,
- 'created_at': creation timestamp,
- 'updated_at': modification timestamp,
- 'deleted_at': deletion timestamp or None,
- 'deleted': boolean indicating if image has been deleted,
- 'status': string description of image status,
- 'is_public': boolean indicating if image is public
- }, ...
- ]
+ :rtype: array
+ :retval: a sequence of mappings with the following signature
+ {'id': opaque id of image,
+ 'name': name of image,
+ 'created_at': creation timestamp,
+ 'updated_at': modification timestamp,
+ 'deleted_at': deletion timestamp or None,
+ 'deleted': boolean indicating if image has been deleted,
+ 'status': string description of image status,
+ 'is_public': boolean indicating if image is public
+ }
If the service does not implement a method that provides a detailed
set of information about images, then the method should raise
@@ -82,7 +56,7 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def show(self, id):
+ def show(self, context, id):
"""
Returns a dict containing image data for the given opaque image id.
@@ -102,7 +76,7 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def create(self, data):
+ def create(self, context, data):
"""
Store the image data and return the new image id.
@@ -111,7 +85,7 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def update(self, image_id, data):
+ def update(self, context, image_id, data):
"""Replace the contents of the given image with the new data.
:raises NotFound if the image does not exist.
@@ -119,7 +93,7 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def delete(self, image_id):
+ def delete(self, context, image_id):
"""
Delete the given image.
@@ -127,68 +101,3 @@ class BaseImageService(object):
"""
raise NotImplementedError
-
-
-class LocalImageService(BaseImageService):
-
- """Image service storing images to local disk.
-
- It assumes that image_ids are integers."""
-
- def __init__(self):
- self._path = "/tmp/nova/images"
- try:
- os.makedirs(self._path)
- except OSError: # Exists
- pass
-
- def _path_to(self, image_id):
- return os.path.join(self._path, str(image_id))
-
- def _ids(self):
- """The list of all image ids."""
- return [int(i) for i in os.listdir(self._path)]
-
- def index(self):
- return [dict(id=i['id'], name=i['name']) for i in self.detail()]
-
- def detail(self):
- return [self.show(id) for id in self._ids()]
-
- def show(self, id):
- try:
- return pickle.load(open(self._path_to(id)))
- except IOError:
- raise exception.NotFound
-
- def create(self, data):
- """
- Store the image data and return the new image id.
- """
- id = random.randint(0, 2 ** 32 - 1)
- data['id'] = id
- self.update(id, data)
- return id
-
- def update(self, image_id, data):
- """Replace the contents of the given image with the new data."""
- try:
- pickle.dump(data, open(self._path_to(image_id), 'w'))
- except IOError:
- raise exception.NotFound
-
- def delete(self, image_id):
- """
- Delete the given image. Raises OSError if the image does not exist.
- """
- try:
- os.unlink(self._path_to(image_id))
- except IOError:
- raise exception.NotFound
-
- def delete_all(self):
- """
- Clears out all images in local directory
- """
- for id in self._ids():
- os.unlink(self._path_to(id))
diff --git a/nova/image/services/__init__.py b/nova/image/services/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/nova/image/services/__init__.py
+++ /dev/null
diff --git a/nova/manager.py b/nova/manager.py
index 4244b2db4..a6efb8732 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -15,8 +15,40 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
"""
-Base class for managers of different parts of the system
+Managers are responsible for a certain aspect of the sytem. It is a logical
+grouping of code relating to a portion of the system. In general other
+components should be using the manager to make changes to the components that
+it is responsible for.
+
+For example, other components that need to deal with volumes in some way,
+should do so by calling methods on the VolumeManager instead of directly
+changing fields in the database. This allows us to keep all of the code
+relating to volumes in the same place.
+
+We have adopted a basic strategy of Smart managers and dumb data, which means
+rather than attaching methods to data objects, components should call manager
+methods that act on the data.
+
+Methods on managers that can be executed locally should be called directly. If
+a particular method must execute on a remote host, this should be done via rpc
+to the service that wraps the manager
+
+Managers should be responsible for most of the db access, and
+non-implementation specific data. Anything implementation specific that can't
+be generalized should be done by the Driver.
+
+In general, we prefer to have one manager with multiple drivers for different
+implementations, but sometimes it makes sense to have multiple managers. You
+can think of it this way: Abstract different overall strategies at the manager
+level(FlatNetwork vs VlanNetwork), and different implementations at the driver
+level(LinuxNetDriver vs CiscoNetDriver).
+
+Managers will often provide methods for initial setup of a host or periodic
+tasksto a wrapping service.
+
+This module provides Manager, a base class for managers.
"""
from nova import utils
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 7b323efa1..4ea24cda6 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -244,7 +244,7 @@ def _confirm_rule(chain, cmd):
def _remove_rule(chain, cmd):
"""Remove iptables rule"""
if FLAGS.use_nova_chains:
- chain = "%S" % chain.lower()
+ chain = "%s" % chain.lower()
_execute("sudo iptables --delete %s %s" % (chain, cmd))
diff --git a/nova/network/manager.py b/nova/network/manager.py
index fddb77663..b033bb0a4 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -17,7 +17,30 @@
# under the License.
"""
-Network Hosts are responsible for allocating ips and setting up network
+Network Hosts are responsible for allocating ips and setting up network.
+
+There are multiple backend drivers that handle specific types of networking
+topologies. All of the network commands are issued to a subclass of
+:class:`NetworkManager`.
+
+**Related Flags**
+
+:network_driver: Driver to use for network creation
+:flat_network_bridge: Bridge device for simple network instances
+:flat_network_dns: Dns for simple network
+:flat_network_dhcp_start: Dhcp start for FlatDhcp
+:vlan_start: First VLAN for private networks
+:vpn_ip: Public IP for the cloudpipe VPN servers
+:vpn_start: First Vpn port for private networks
+:cnt_vpn_clients: Number of addresses reserved for vpn clients
+:network_size: Number of addresses in each private subnet
+:floating_range: Floating IP address block
+:fixed_range: Fixed IP address block
+:date_dhcp_on_disassociate: Whether to update dhcp when fixed_ip
+ is disassociated
+:fixed_ip_disassociate_timeout: Seconds after which a deallocated ip
+ is disassociated
+
"""
import datetime
@@ -63,15 +86,16 @@ flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600,
class AddressAlreadyAllocated(exception.Error):
- """Address was already allocated"""
+ """Address was already allocated."""
pass
class NetworkManager(manager.Manager):
- """Implements common network manager functionality
+ """Implements common network manager functionality.
- This class must be subclassed.
+ This class must be subclassed to support specific topologies.
"""
+
def __init__(self, network_driver=None, *args, **kwargs):
if not network_driver:
network_driver = FLAGS.network_driver
@@ -86,7 +110,7 @@ class NetworkManager(manager.Manager):
self._on_set_network_host(ctxt, network['id'])
def set_network_host(self, context, network_id):
- """Safely sets the host of the network"""
+ """Safely sets the host of the network."""
logging.debug("setting network host")
host = self.db.network_set_host(context,
network_id,
@@ -95,34 +119,34 @@ class NetworkManager(manager.Manager):
return host
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from the pool"""
+ """Gets a fixed ip from the pool."""
raise NotImplementedError()
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool"""
+ """Returns a fixed ip to the pool."""
raise NotImplementedError()
def setup_fixed_ip(self, context, address):
- """Sets up rules for fixed ip"""
+ """Sets up rules for fixed ip."""
raise NotImplementedError()
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a network"""
+ """Called when this host becomes the host for a network."""
raise NotImplementedError()
def setup_compute_network(self, context, instance_id):
- """Sets up matching network for compute hosts"""
+ """Sets up matching network for compute hosts."""
raise NotImplementedError()
def allocate_floating_ip(self, context, project_id):
- """Gets an floating ip from the pool"""
+ """Gets an floating ip from the pool."""
# TODO(vish): add floating ips through manage command
return self.db.floating_ip_allocate_address(context,
self.host,
project_id)
def associate_floating_ip(self, context, floating_address, fixed_address):
- """Associates an floating ip to a fixed ip"""
+ """Associates an floating ip to a fixed ip."""
self.db.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address)
@@ -130,18 +154,18 @@ class NetworkManager(manager.Manager):
self.driver.ensure_floating_forward(floating_address, fixed_address)
def disassociate_floating_ip(self, context, floating_address):
- """Disassociates a floating ip"""
+ """Disassociates a floating ip."""
fixed_address = self.db.floating_ip_disassociate(context,
floating_address)
self.driver.unbind_floating_ip(floating_address)
self.driver.remove_floating_forward(floating_address, fixed_address)
def deallocate_floating_ip(self, context, floating_address):
- """Returns an floating ip to the pool"""
+ """Returns an floating ip to the pool."""
self.db.floating_ip_deallocate(context, floating_address)
def lease_fixed_ip(self, context, mac, address):
- """Called by dhcp-bridge when ip is leased"""
+ """Called by dhcp-bridge when ip is leased."""
logging.debug("Leasing IP %s", address)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
@@ -158,7 +182,7 @@ class NetworkManager(manager.Manager):
logging.warn("IP %s leased that was already deallocated", address)
def release_fixed_ip(self, context, mac, address):
- """Called by dhcp-bridge when ip is released"""
+ """Called by dhcp-bridge when ip is released."""
logging.debug("Releasing IP %s", address)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
@@ -171,7 +195,7 @@ class NetworkManager(manager.Manager):
if not fixed_ip_ref['leased']:
logging.warn("IP %s released that was not leased", address)
self.db.fixed_ip_update(context,
- fixed_ip_ref['str_id'],
+ fixed_ip_ref['address'],
{'leased': False})
if not fixed_ip_ref['allocated']:
self.db.fixed_ip_disassociate(context, address)
@@ -183,26 +207,26 @@ class NetworkManager(manager.Manager):
self.driver.update_dhcp(context, network_ref['id'])
def get_network(self, context):
- """Get the network for the current context"""
+ """Get the network for the current context."""
raise NotImplementedError()
def create_networks(self, context, num_networks, network_size,
*args, **kwargs):
- """Create networks based on parameters"""
+ """Create networks based on parameters."""
raise NotImplementedError()
@property
def _bottom_reserved_ips(self): # pylint: disable-msg=R0201
- """Number of reserved ips at the bottom of the range"""
+ """Number of reserved ips at the bottom of the range."""
return 2 # network, gateway
@property
def _top_reserved_ips(self): # pylint: disable-msg=R0201
- """Number of reserved ips at the top of the range"""
+ """Number of reserved ips at the top of the range."""
return 1 # broadcast
def _create_fixed_ips(self, context, network_id):
- """Create all fixed ips for network"""
+ """Create all fixed ips for network."""
network_ref = self.db.network_get(context, network_id)
# NOTE(vish): Should these be properties of the network as opposed
# to properties of the manager class?
@@ -222,10 +246,10 @@ class NetworkManager(manager.Manager):
class FlatManager(NetworkManager):
- """Basic network where no vlans are used"""
+ """Basic network where no vlans are used."""
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from the pool"""
+ """Gets a fixed ip from the pool."""
# TODO(vish): when this is called by compute, we can associate compute
# with a network, or a cluster of computes with a network
# and use that network here with a method like
@@ -239,21 +263,21 @@ class FlatManager(NetworkManager):
return address
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool"""
+ """Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address, {'allocated': False})
self.db.fixed_ip_disassociate(context.elevated(), address)
def setup_compute_network(self, context, instance_id):
- """Network is created manually"""
+ """Network is created manually."""
pass
def setup_fixed_ip(self, context, address):
- """Currently no setup"""
+ """Currently no setup."""
pass
def create_networks(self, context, cidr, num_networks, network_size,
*args, **kwargs):
- """Create networks based on parameters"""
+ """Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
for index in range(num_networks):
start = index * network_size
@@ -271,7 +295,7 @@ class FlatManager(NetworkManager):
self._create_fixed_ips(context, network_ref['id'])
def get_network(self, context):
- """Get the network for the current context"""
+ """Get the network for the current context."""
# NOTE(vish): To support mutilple network hosts, This could randomly
# select from multiple networks instead of just
# returning the one. It could also potentially be done
@@ -280,7 +304,7 @@ class FlatManager(NetworkManager):
FLAGS.flat_network_bridge)
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a network"""
+ """Called when this host becomes the host for a network."""
net = {}
net['injected'] = True
net['bridge'] = FLAGS.flat_network_bridge
@@ -289,19 +313,19 @@ class FlatManager(NetworkManager):
class FlatDHCPManager(NetworkManager):
- """Flat networking with dhcp"""
+ """Flat networking with dhcp."""
def setup_fixed_ip(self, context, address):
- """Setup dhcp for this network"""
+ """Setup dhcp for this network."""
network_ref = db.fixed_ip_get_by_address(context, address)
self.driver.update_dhcp(context, network_ref['id'])
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool"""
+ """Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address, {'allocated': False})
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a project"""
+ """Called when this host becomes the host for a project."""
super(FlatDHCPManager, self)._on_set_network_host(context, network_id)
network_ref = self.db.network_get(context, network_id)
self.db.network_update(context,
@@ -313,11 +337,11 @@ class FlatDHCPManager(NetworkManager):
class VlanManager(NetworkManager):
- """Vlan network with dhcp"""
+ """Vlan network with dhcp."""
@defer.inlineCallbacks
def periodic_tasks(self, context=None):
- """Tasks to be run at a periodic interval"""
+ """Tasks to be run at a periodic interval."""
yield super(VlanManager, self).periodic_tasks(context)
now = datetime.datetime.utcnow()
timeout = FLAGS.fixed_ip_disassociate_timeout
@@ -330,13 +354,13 @@ class VlanManager(NetworkManager):
def init_host(self):
"""Do any initialization that needs to be run if this is a
- standalone service.
+ standalone service.
"""
super(VlanManager, self).init_host()
self.driver.init_host()
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from the pool"""
+ """Gets a fixed ip from the pool."""
# TODO(vish): This should probably be getting project_id from
# the instance, but it is another trip to the db.
# Perhaps this method should take an instance_ref.
@@ -356,11 +380,11 @@ class VlanManager(NetworkManager):
return address
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool"""
+ """Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address, {'allocated': False})
def setup_fixed_ip(self, context, address):
- """Sets forwarding rules and dhcp for fixed ip"""
+ """Sets forwarding rules and dhcp for fixed ip."""
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
network_ref = self.db.fixed_ip_get_network(context, address)
if self.db.instance_is_vpn(context, fixed_ip_ref['instance_id']):
@@ -370,19 +394,19 @@ class VlanManager(NetworkManager):
self.driver.update_dhcp(context, network_ref['id'])
def setup_compute_network(self, context, instance_id):
- """Sets up matching network for compute hosts"""
+ """Sets up matching network for compute hosts."""
network_ref = db.network_get_by_instance(context, instance_id)
self.driver.ensure_vlan_bridge(network_ref['vlan'],
network_ref['bridge'])
def restart_nets(self):
- """Ensure the network for each user is enabled"""
+ """Ensure the network for each user is enabled."""
# TODO(vish): Implement this
pass
def create_networks(self, context, cidr, num_networks, network_size,
vlan_start, vpn_start):
- """Create networks based on parameters"""
+ """Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
for index in range(num_networks):
vlan = vlan_start + index
@@ -407,12 +431,12 @@ class VlanManager(NetworkManager):
self._create_fixed_ips(context, network_ref['id'])
def get_network(self, context):
- """Get the network for the current context"""
+ """Get the network for the current context."""
return self.db.project_get_network(context.elevated(),
context.project_id)
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a network"""
+ """Called when this host becomes the host for a network."""
network_ref = self.db.network_get(context, network_id)
net = {}
net['vpn_public_address'] = FLAGS.vpn_ip
@@ -424,11 +448,11 @@ class VlanManager(NetworkManager):
@property
def _bottom_reserved_ips(self):
- """Number of reserved ips at the bottom of the range"""
+ """Number of reserved ips at the bottom of the range."""
return super(VlanManager, self)._bottom_reserved_ips + 1 # vpn server
@property
def _top_reserved_ips(self):
- """Number of reserved ips at the top of the range"""
+ """Number of reserved ips at the top of the range."""
parent_reserved = super(VlanManager, self)._top_reserved_ips
return parent_reserved + FLAGS.cnt_vpn_clients
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
index 0ba4934d1..697982538 100644
--- a/nova/objectstore/bucket.py
+++ b/nova/objectstore/bucket.py
@@ -78,8 +78,8 @@ class Bucket(object):
path = os.path.abspath(os.path.join(
FLAGS.buckets_path, bucket_name))
if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
- os.path.exists(path):
- raise exception.NotAuthorized()
+ os.path.exists(path):
+ raise exception.NotAuthorized()
os.makedirs(path)
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index b26906001..c8920b00c 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -61,6 +61,7 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
+flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.')
def render_xml(request, value):
@@ -438,6 +439,7 @@ def get_application():
# Disabled because of lack of proper introspection in Twisted
# or possibly different versions of twisted?
# pylint: disable-msg=E1101
- objectStoreService = internet.TCPServer(FLAGS.s3_port, factory)
+ objectStoreService = internet.TCPServer(FLAGS.s3_port, factory,
+ interface=FLAGS.s3_listen_host)
objectStoreService.setServiceParent(application)
return application
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index b7b2ec6ab..4554444fa 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -48,8 +48,8 @@ class Image(object):
self.image_id = image_id
self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id))
if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \
- not os.path.isdir(self.path):
- raise exception.NotFound
+ not os.path.isdir(self.path):
+ raise exception.NotFound
@property
def image_path(self):
diff --git a/nova/rpc.py b/nova/rpc.py
index 895820cd0..86a29574f 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -24,6 +24,7 @@ No fan-out support yet.
import json
import logging
import sys
+import time
import uuid
from carrot import connection as carrot_connection
@@ -37,8 +38,8 @@ from nova import fakerabbit
from nova import flags
from nova import context
-FLAGS = flags.FLAGS
+FLAGS = flags.FLAGS
LOG = logging.getLogger('amqplib')
LOG.setLevel(logging.DEBUG)
@@ -82,8 +83,24 @@ class Consumer(messaging.Consumer):
Contains methods for connecting the fetch method to async loops
"""
def __init__(self, *args, **kwargs):
- self.failed_connection = False
- super(Consumer, self).__init__(*args, **kwargs)
+ for i in xrange(FLAGS.rabbit_max_retries):
+ if i > 0:
+ time.sleep(FLAGS.rabbit_retry_interval)
+ try:
+ super(Consumer, self).__init__(*args, **kwargs)
+ self.failed_connection = False
+ break
+ except: # Catching all because carrot sucks
+ logging.exception("AMQP server on %s:%d is unreachable." \
+ " Trying again in %d seconds." % (
+ FLAGS.rabbit_host,
+ FLAGS.rabbit_port,
+ FLAGS.rabbit_retry_interval))
+ self.failed_connection = True
+ if self.failed_connection:
+ logging.exception("Unable to connect to AMQP server" \
+ " after %d tries. Shutting down." % FLAGS.rabbit_max_retries)
+ sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
"""Wraps the parent fetch with some logic for failed connections"""
@@ -91,11 +108,12 @@ class Consumer(messaging.Consumer):
# refactored into some sort of connection manager object
try:
if self.failed_connection:
- # NOTE(vish): conn is defined in the parent class, we can
+ # NOTE(vish): connection is defined in the parent class, we can
# recreate it as long as we create the backend too
# pylint: disable-msg=W0201
- self.conn = Connection.recreate()
- self.backend = self.conn.create_backend()
+ self.connection = Connection.recreate()
+ self.backend = self.connection.create_backend()
+ self.declare()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection:
logging.error("Reconnected to queue")
@@ -206,6 +224,7 @@ class DirectConsumer(Consumer):
self.routing_key = msg_id
self.exchange = msg_id
self.auto_delete = True
+ self.exclusive = True
super(DirectConsumer, self).__init__(connection=connection)
@@ -262,6 +281,9 @@ def _unpack_context(msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
+ # NOTE(vish): Some versions of python don't like unicode keys
+ # in kwargs.
+ key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
diff --git a/nova/server.py b/nova/server.py
index cb424caa1..a0ee54681 100644
--- a/nova/server.py
+++ b/nova/server.py
@@ -42,6 +42,8 @@ flags.DEFINE_bool('daemonize', False, 'daemonize this process')
# clutter.
flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
flags.DEFINE_string('logfile', None, 'log file to output to')
+flags.DEFINE_string('logdir', None, 'directory to keep log files in '
+ '(will be prepended to $logfile)')
flags.DEFINE_string('pidfile', None, 'pid file to output to')
flags.DEFINE_string('working_directory', './', 'working directory...')
flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run')
@@ -119,6 +121,8 @@ def daemonize(args, name, main):
else:
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
+ if FLAGS.logdir:
+ FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
logfile = logging.FileHandler(FLAGS.logfile)
logfile.setFormatter(formatter)
logger.addHandler(logfile)
diff --git a/nova/service.py b/nova/service.py
index d53d92b65..9454d4049 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -17,7 +17,12 @@
# under the License.
"""
-Generic Node baseclass for all workers that run on hosts
+A service is a very thin wrapper around a Manager object. It exposes the
+manager's public methods to other components of the system via rpc. It will
+report state periodically to the database and is responsible for initiating
+any periodic tasts that need to be executed on a given host.
+
+This module contains Service, a generic baseclass for all workers.
"""
import inspect
diff --git a/nova/test.py b/nova/test.py
index 8ef7eca1a..5c2a72819 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -28,7 +28,6 @@ import time
import mox
import stubout
-from tornado import ioloop
from twisted.internet import defer
from twisted.trial import unittest
@@ -159,158 +158,3 @@ class TrialTestCase(unittest.TestCase):
_wrapped.func_name = self.originalAttach.func_name
rpc.Consumer.attach_to_twisted = _wrapped
-
-
-class BaseTestCase(TrialTestCase):
- # TODO(jaypipes): Can this be moved into the TrialTestCase class?
- """Base test case class for all unit tests.
-
- DEPRECATED: This is being removed once Tornado is gone, use TrialTestCase.
- """
- def setUp(self):
- """Run before each test method to initialize test environment"""
- super(BaseTestCase, self).setUp()
- # TODO(termie): we could possibly keep a more global registry of
- # the injected listeners... this is fine for now though
- self.ioloop = ioloop.IOLoop.instance()
-
- self._waiting = None
- self._done_waiting = False
- self._timed_out = False
-
- def _wait_for_test(self, timeout=60):
- """ Push the ioloop along to wait for our test to complete. """
- self._waiting = self.ioloop.add_timeout(time.time() + timeout,
- self._timeout)
-
- def _wait():
-
- """Wrapped wait function. Called on timeout."""
- if self._timed_out:
- self.fail('test timed out')
- self._done()
- if self._done_waiting:
- self.ioloop.stop()
- return
- # we can use add_callback here but this uses less cpu when testing
- self.ioloop.add_timeout(time.time() + 0.01, _wait)
-
- self.ioloop.add_callback(_wait)
- self.ioloop.start()
-
- def _done(self):
- """Callback used for cleaning up deferred test methods."""
- if self._waiting:
- try:
- self.ioloop.remove_timeout(self._waiting)
- except Exception: # pylint: disable-msg=W0703
- # TODO(jaypipes): This produces a pylint warning. Should
- # we really be catching Exception and then passing here?
- pass
- self._waiting = None
- self._done_waiting = True
-
- def _maybe_inline_callbacks(self, func):
- """ If we're doing async calls in our tests, wait on them.
-
- This is probably the most complicated hunk of code we have so far.
-
- First up, if the function is normal (not async) we just act normal
- and return.
-
- Async tests will use the "Inline Callbacks" pattern, which means
- you yield Deferreds at every "waiting" step of your code instead
- of making epic callback chains.
-
- Example (callback chain, ugly):
-
- # A deferred instance
- d = self.compute.terminate_instance(instance_id)
- def _describe(_):
- # Another deferred instance
- d_desc = self.compute.describe_instances()
- return d_desc
- def _checkDescribe(rv):
- self.assertEqual(rv, [])
- d.addCallback(_describe)
- d.addCallback(_checkDescribe)
- d.addCallback(lambda x: self._done())
- self._wait_for_test()
-
- Example (inline callbacks! yay!):
-
- yield self.compute.terminate_instance(instance_id)
- rv = yield self.compute.describe_instances()
- self.assertEqual(rv, [])
-
- If the test fits the Inline Callbacks pattern we will automatically
- handle calling wait and done.
- """
- # TODO(termie): this can be a wrapper function instead and
- # and we can make a metaclass so that we don't
- # have to copy all that "run" code below.
- g = func()
- if not hasattr(g, 'send'):
- self._done()
- return defer.succeed(g)
-
- inlined = defer.inlineCallbacks(func)
- d = inlined()
- return d
-
- def _catch_exceptions(self, result, failure):
- """Catches all exceptions and handles keyboard interrupts."""
- exc = (failure.type, failure.value, failure.getTracebackObject())
- if isinstance(failure.value, self.failureException):
- result.addFailure(self, exc)
- elif isinstance(failure.value, KeyboardInterrupt):
- raise
- else:
- result.addError(self, exc)
-
- self._done()
-
- def _timeout(self):
- """Helper method which trips the timeouts"""
- self._waiting = False
- self._timed_out = True
-
- def run(self, result=None):
- """Runs the test case"""
-
- result.startTest(self)
- test_method = getattr(self, self._testMethodName)
- try:
- try:
- self.setUp()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, sys.exc_info())
- return
-
- ok = False
- try:
- d = self._maybe_inline_callbacks(test_method)
- d.addErrback(lambda x: self._catch_exceptions(result, x))
- d.addBoth(lambda x: self._done() and x)
- self._wait_for_test()
- ok = True
- except self.failureException:
- result.addFailure(self, sys.exc_info())
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, sys.exc_info())
-
- try:
- self.tearDown()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, sys.exc_info())
- ok = False
- if ok:
- result.addSuccess(self)
- finally:
- result.stopTest(self)
diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py
index 46f09e906..9caa8c9d0 100644
--- a/nova/tests/api/__init__.py
+++ b/nova/tests/api/__init__.py
@@ -42,7 +42,7 @@ class Test(unittest.TestCase):
environ_keys = {'HTTP_HOST': '%s.example.com' % subdomain}
environ_keys.update(kwargs)
req = webob.Request.blank(url, environ_keys)
- return req.get_response(api.API())
+ return req.get_response(api.API('ec2'))
def test_openstack(self):
self.stubs.Set(api.openstack, 'API', APIStub)
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 7ecb72ab3..639a2ebe4 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -29,13 +29,11 @@ from nova import flags
from nova import exception as exc
import nova.api.openstack.auth
from nova.image import service
-from nova.image.services import glance
+from nova.image import glance
+from nova.tests import fake_flags
from nova.wsgi import Router
-FLAGS = flags.FLAGS
-
-
class Context(object):
pass
@@ -78,7 +76,7 @@ def stub_out_image_service(stubs):
def fake_image_show(meh, id):
return dict(kernelId=1, ramdiskId=1)
- stubs.Set(nova.image.service.LocalImageService, 'show', fake_image_show)
+ stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show)
def stub_out_auth(stubs):
@@ -107,7 +105,6 @@ def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
stubs.Set(nova.utils, 'get_my_ip', get_my_ip)
- FLAGS.FAKE_subdomain = 'api'
def stub_out_glance(stubs, initial_fixtures=[]):
@@ -154,21 +151,19 @@ def stub_out_glance(stubs, initial_fixtures=[]):
self.fixtures = []
fake_parallax_client = FakeParallaxClient(initial_fixtures)
- stubs.Set(nova.image.services.glance.ParallaxClient, 'get_image_index',
+ stubs.Set(nova.image.glance.ParallaxClient, 'get_image_index',
fake_parallax_client.fake_get_image_index)
- stubs.Set(nova.image.services.glance.ParallaxClient, 'get_image_details',
+ stubs.Set(nova.image.glance.ParallaxClient, 'get_image_details',
fake_parallax_client.fake_get_image_details)
- stubs.Set(nova.image.services.glance.ParallaxClient, 'get_image_metadata',
+ stubs.Set(nova.image.glance.ParallaxClient, 'get_image_metadata',
fake_parallax_client.fake_get_image_metadata)
- stubs.Set(nova.image.services.glance.ParallaxClient, 'add_image_metadata',
+ stubs.Set(nova.image.glance.ParallaxClient, 'add_image_metadata',
fake_parallax_client.fake_add_image_metadata)
- stubs.Set(nova.image.services.glance.ParallaxClient,
- 'update_image_metadata',
+ stubs.Set(nova.image.glance.ParallaxClient, 'update_image_metadata',
fake_parallax_client.fake_update_image_metadata)
- stubs.Set(nova.image.services.glance.ParallaxClient,
- 'delete_image_metadata',
+ stubs.Set(nova.image.glance.ParallaxClient, 'delete_image_metadata',
fake_parallax_client.fake_delete_image_metadata)
- stubs.Set(nova.image.services.glance.GlanceImageService, 'delete_all',
+ stubs.Set(nova.image.glance.GlanceImageService, 'delete_all',
fake_parallax_client.fake_delete_all)
diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py
index a8c0ff9f8..dd83991b9 100644
--- a/nova/tests/api/openstack/test_api.py
+++ b/nova/tests/api/openstack/test_api.py
@@ -24,22 +24,28 @@ from nova.api.openstack import API
from nova.api.openstack import faults
from webob import Request
+
class APITest(unittest.TestCase):
def test_exceptions_are_converted_to_faults(self):
+
@webob.dec.wsgify
def succeed(req):
return 'Succeeded'
+
@webob.dec.wsgify
def raise_webob_exc(req):
raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
+
@webob.dec.wsgify
def fail(req):
raise Exception("Threw an exception")
+
@webob.dec.wsgify
def raise_api_fault(req):
exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
return faults.Fault(exc)
+
api = API()
api.application = succeed
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index b63da187f..29f4b8874 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -51,7 +51,7 @@ class Test(unittest.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'herp'
req.headers['X-Auth-Key'] = 'derp'
- result = req.get_response(nova.api.API())
+ result = req.get_response(nova.api.API('os'))
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
self.assertEqual(result.headers['X-CDN-Management-Url'],
@@ -65,7 +65,7 @@ class Test(unittest.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'herp'
req.headers['X-Auth-Key'] = 'derp'
- result = req.get_response(nova.api.API())
+ result = req.get_response(nova.api.API('os'))
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
self.assertEqual(result.headers['X-Server-Management-Url'],
@@ -79,7 +79,7 @@ class Test(unittest.TestCase):
fakes.FakeRouter)
req = webob.Request.blank('/v1.0/fake')
req.headers['X-Auth-Token'] = token
- result = req.get_response(nova.api.API())
+ result = req.get_response(nova.api.API('os'))
self.assertEqual(result.status, '200 OK')
self.assertEqual(result.headers['X-Test-Success'], 'True')
@@ -103,7 +103,7 @@ class Test(unittest.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-Token'] = 'bacon'
- result = req.get_response(nova.api.API())
+ result = req.get_response(nova.api.API('os'))
self.assertEqual(result.status, '401 Unauthorized')
self.assertEqual(self.destroy_called, True)
@@ -111,18 +111,18 @@ class Test(unittest.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'herp'
req.headers['X-Auth-Key'] = 'derp'
- result = req.get_response(nova.api.API())
+ result = req.get_response(nova.api.API('os'))
self.assertEqual(result.status, '401 Unauthorized')
def test_no_user(self):
req = webob.Request.blank('/v1.0/')
- result = req.get_response(nova.api.API())
+ result = req.get_response(nova.api.API('os'))
self.assertEqual(result.status, '401 Unauthorized')
def test_bad_token(self):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-Token'] = 'baconbaconbacon'
- result = req.get_response(nova.api.API())
+ result = req.get_response(nova.api.API('os'))
self.assertEqual(result.status, '401 Unauthorized')
@@ -146,7 +146,7 @@ class TestLimiter(unittest.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'herp'
req.headers['X-Auth-Key'] = 'derp'
- result = req.get_response(nova.api.API())
+ result = req.get_response(nova.api.API('os'))
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
token = result.headers['X-Auth-Token']
@@ -155,7 +155,7 @@ class TestLimiter(unittest.TestCase):
req = webob.Request.blank('/v1.0/fake')
req.method = 'POST'
req.headers['X-Auth-Token'] = token
- result = req.get_response(nova.api.API())
+ result = req.get_response(nova.api.API('os'))
self.assertEqual(result.status, '200 OK')
self.assertEqual(result.headers['X-Test-Success'], 'True')
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index 8dd4d1f29..41018afdf 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -39,7 +39,7 @@ class FlavorsTest(unittest.TestCase):
def test_get_flavor_list(self):
req = webob.Request.blank('/v1.0/flavors')
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
def test_get_flavor_by_id(self):
pass
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index d61c3a99b..f610cbf9c 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -28,6 +28,7 @@ import unittest
import stubout
import webob
+from nova import context
from nova import exception
from nova import flags
from nova import utils
@@ -52,12 +53,13 @@ class BaseImageServiceTests(object):
'serverId': None,
'progress': None}
- num_images = len(self.service.index())
+ num_images = len(self.service.index(self.context))
- id = self.service.create(fixture)
+ id = self.service.create(self.context, fixture)
self.assertNotEquals(None, id)
- self.assertEquals(num_images + 1, len(self.service.index()))
+ self.assertEquals(num_images + 1,
+ len(self.service.index(self.context)))
def test_create_and_show_non_existing_image(self):
@@ -68,14 +70,15 @@ class BaseImageServiceTests(object):
'serverId': None,
'progress': None}
- num_images = len(self.service.index())
+ num_images = len(self.service.index(self.context))
- id = self.service.create(fixture)
+ id = self.service.create(self.context, fixture)
self.assertNotEquals(None, id)
self.assertRaises(exception.NotFound,
self.service.show,
+ self.context,
'bad image id')
def test_update(self):
@@ -87,12 +90,12 @@ class BaseImageServiceTests(object):
'serverId': None,
'progress': None}
- id = self.service.create(fixture)
+ id = self.service.create(self.context, fixture)
fixture['status'] = 'in progress'
- self.service.update(id, fixture)
- new_image_data = self.service.show(id)
+ self.service.update(self.context, id, fixture)
+ new_image_data = self.service.show(self.context, id)
self.assertEquals('in progress', new_image_data['status'])
def test_delete(self):
@@ -111,20 +114,20 @@ class BaseImageServiceTests(object):
'serverId': None,
'progress': None}]
- num_images = len(self.service.index())
- self.assertEquals(0, num_images, str(self.service.index()))
+ num_images = len(self.service.index(self.context))
+ self.assertEquals(0, num_images, str(self.service.index(self.context)))
ids = []
for fixture in fixtures:
- new_id = self.service.create(fixture)
+ new_id = self.service.create(self.context, fixture)
ids.append(new_id)
- num_images = len(self.service.index())
- self.assertEquals(2, num_images, str(self.service.index()))
+ num_images = len(self.service.index(self.context))
+ self.assertEquals(2, num_images, str(self.service.index(self.context)))
- self.service.delete(ids[0])
+ self.service.delete(self.context, ids[0])
- num_images = len(self.service.index())
+ num_images = len(self.service.index(self.context))
self.assertEquals(1, num_images)
@@ -135,8 +138,9 @@ class LocalImageServiceTest(unittest.TestCase,
def setUp(self):
self.stubs = stubout.StubOutForTesting()
- service_class = 'nova.image.service.LocalImageService'
+ service_class = 'nova.image.local.LocalImageService'
self.service = utils.import_object(service_class)
+ self.context = context.RequestContext(None, None)
def tearDown(self):
self.service.delete_all()
@@ -151,8 +155,9 @@ class GlanceImageServiceTest(unittest.TestCase,
def setUp(self):
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_glance(self.stubs)
- service_class = 'nova.image.services.glance.GlanceImageService'
+ service_class = 'nova.image.glance.GlanceImageService'
self.service = utils.import_object(service_class)
+ self.context = context.RequestContext(None, None)
self.service.delete_all()
def tearDown(self):
@@ -187,7 +192,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
def setUp(self):
self.orig_image_service = FLAGS.image_service
- FLAGS.image_service = 'nova.image.services.glance.GlanceImageService'
+ FLAGS.image_service = 'nova.image.glance.GlanceImageService'
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
@@ -203,7 +208,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
def test_get_image_index(self):
req = webob.Request.blank('/v1.0/images')
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
res_dict = json.loads(res.body)
fixture_index = [dict(id=f['id'], name=f['name']) for f
@@ -215,7 +220,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
def test_get_image_details(self):
req = webob.Request.blank('/v1.0/images/detail')
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
res_dict = json.loads(res.body)
for image in res_dict['images']:
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 55efcf733..8cfc6c45a 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -69,14 +69,14 @@ class ServersTest(unittest.TestCase):
def test_get_server_by_id(self):
req = webob.Request.blank('/v1.0/servers/1')
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
res_dict = json.loads(res.body)
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
def test_get_server_list(self):
req = webob.Request.blank('/v1.0/servers')
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
res_dict = json.loads(res.body)
i = 0
@@ -91,9 +91,7 @@ class ServersTest(unittest.TestCase):
pass
def instance_create(context, inst):
- class Foo(object):
- internal_id = 1
- return Foo()
+ return {'id': 1, 'internal_id': 1}
def fake_method(*args, **kwargs):
pass
@@ -121,14 +119,14 @@ class ServersTest(unittest.TestCase):
req.method = 'POST'
req.body = json.dumps(body)
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status_int, 200)
def test_update_no_body(self):
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status_int, 422)
def test_update_bad_params(self):
@@ -147,7 +145,7 @@ class ServersTest(unittest.TestCase):
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
req.body = self.body
- req.get_response(nova.api.API())
+ req.get_response(nova.api.API('os'))
def test_update_server(self):
inst_dict = dict(name='server_test', adminPass='bacon')
@@ -163,28 +161,28 @@ class ServersTest(unittest.TestCase):
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
req.body = self.body
- req.get_response(nova.api.API())
+ req.get_response(nova.api.API('os'))
def test_create_backup_schedules(self):
req = webob.Request.blank('/v1.0/servers/1/backup_schedules')
req.method = 'POST'
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status, '404 Not Found')
def test_delete_backup_schedules(self):
req = webob.Request.blank('/v1.0/servers/1/backup_schedules')
req.method = 'DELETE'
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status, '404 Not Found')
def test_get_server_backup_schedules(self):
req = webob.Request.blank('/v1.0/servers/1/backup_schedules')
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status, '404 Not Found')
def test_get_all_server_details(self):
req = webob.Request.blank('/v1.0/servers/detail')
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
res_dict = json.loads(res.body)
i = 0
@@ -202,7 +200,7 @@ class ServersTest(unittest.TestCase):
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
def test_server_rebuild(self):
body = dict(server=dict(
@@ -212,7 +210,7 @@ class ServersTest(unittest.TestCase):
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
def test_server_resize(self):
body = dict(server=dict(
@@ -222,7 +220,7 @@ class ServersTest(unittest.TestCase):
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
def test_delete_server_instance(self):
req = webob.Request.blank('/v1.0/servers/1')
@@ -236,7 +234,7 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.db.api, 'instance_destroy',
instance_destroy_mock)
- res = req.get_response(nova.api.API())
+ res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status, '202 Accepted')
self.assertEqual(self.server_delete_called, True)
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index 0b1c3e353..33d4cb294 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -34,10 +34,6 @@ from nova.api.ec2 import apirequest
from nova.auth import manager
-FLAGS = flags.FLAGS
-FLAGS.FAKE_subdomain = 'ec2'
-
-
class FakeHttplibSocket(object):
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
def __init__(self, response_string):
@@ -83,7 +79,7 @@ class FakeHttplibConnection(object):
pass
-class XmlConversionTestCase(test.BaseTestCase):
+class XmlConversionTestCase(test.TrialTestCase):
"""Unit test api xml conversion"""
def test_number_conversion(self):
conv = apirequest._try_convert
@@ -100,7 +96,7 @@ class XmlConversionTestCase(test.BaseTestCase):
self.assertEqual(conv('-0'), 0)
-class ApiEc2TestCase(test.BaseTestCase):
+class ApiEc2TestCase(test.TrialTestCase):
"""Unit test for the cloud controller on an EC2 API"""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
@@ -109,7 +105,7 @@ class ApiEc2TestCase(test.BaseTestCase):
self.host = '127.0.0.1'
- self.app = api.API()
+ self.app = api.API('ec2')
def expect_http(self, host=None, is_secure=False):
"""Returns a new EC2 connection"""
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 2d61d2675..9886a2449 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -91,6 +91,41 @@ class CloudTestCase(test.TrialTestCase):
# NOTE(vish): create depends on pool, so just call helper directly
return cloud._gen_key(self.context, self.context.user.id, name)
+ def test_describe_addresses(self):
+ """Makes sure describe addresses runs without raising an exception"""
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'host': FLAGS.host})
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ greenthread.sleep(0.3)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_associate_disassociate_address(self):
+ """Verifies associate runs cleanly without raising an exception"""
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'host': FLAGS.host})
+ self.cloud.allocate_address(self.context)
+ inst = db.instance_create(self.context, {})
+ fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
+ ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
+ self.cloud.associate_address(self.context,
+ instance_id=ec2_id,
+ public_ip=address)
+ self.cloud.disassociate_address(self.context,
+ public_ip=address)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ greenthread.sleep(0.3)
+ self.network.deallocate_fixed_ip(self.context, fixed)
+ db.instance_destroy(self.context, inst['id'])
+ db.floating_ip_destroy(self.context, address)
+
def test_console_output(self):
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py
index 01b5651df..71a1a4457 100644
--- a/nova/tests/compute_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -66,6 +66,27 @@ class ComputeTestCase(test.TrialTestCase):
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']
+ def test_create_instance_associates_security_groups(self):
+ """Make sure create_instance associates security groups"""
+ inst = {}
+ inst['user_id'] = self.user.id
+ inst['project_id'] = self.project.id
+ values = {'name': 'default',
+ 'description': 'default',
+ 'user_id': self.user.id,
+ 'project_id': self.project.id}
+ group = db.security_group_create(self.context, values)
+ ref = self.compute.create_instance(self.context,
+ security_groups=[group['id']],
+ **inst)
+ # reload to get groups
+ instance_ref = db.instance_get(self.context, ref['id'])
+ try:
+ self.assertEqual(len(instance_ref['security_groups']), 1)
+ finally:
+ db.security_group_destroy(self.context, group['id'])
+ db.instance_destroy(self.context, instance_ref['id'])
+
@defer.inlineCallbacks
def test_run_terminate(self):
"""Make sure it is possible to run and terminate instance"""
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 4bbef8832..7376a11dd 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -21,9 +21,10 @@ from nova import flags
FLAGS = flags.FLAGS
flags.DECLARE('volume_driver', 'nova.volume.manager')
-FLAGS.volume_driver = 'nova.volume.driver.FakeAOEDriver'
+FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver'
FLAGS.connection_type = 'fake'
FLAGS.fake_rabbit = True
+flags.DECLARE('auth_driver', 'nova.auth.manager')
FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
@@ -31,9 +32,11 @@ flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS.network_size = 16
FLAGS.num_networks = 5
FLAGS.fake_network = True
-flags.DECLARE('num_shelves', 'nova.volume.manager')
-flags.DECLARE('blades_per_shelf', 'nova.volume.manager')
+flags.DECLARE('num_shelves', 'nova.volume.driver')
+flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
+flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
FLAGS.num_shelves = 2
FLAGS.blades_per_shelf = 4
+FLAGS.iscsi_num_targets = 8
FLAGS.verbose = True
FLAGS.sql_connection = 'sqlite:///nova.sqlite'
diff --git a/nova/tests/misc_unittest.py b/nova/tests/misc_unittest.py
new file mode 100644
index 000000000..856060afa
--- /dev/null
+++ b/nova/tests/misc_unittest.py
@@ -0,0 +1,48 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import subprocess
+
+from nova import test
+from nova.utils import parse_mailmap, str_dict_replace
+
+
+class ProjectTestCase(test.TrialTestCase):
+ def test_authors_up_to_date(self):
+ if os.path.exists('../.bzr'):
+ log_cmd = subprocess.Popen(["bzr", "log", "-n0"],
+ stdout=subprocess.PIPE)
+ changelog = log_cmd.communicate()[0]
+ mailmap = parse_mailmap('../.mailmap')
+
+ contributors = set()
+ for l in changelog.split('\n'):
+ l = l.strip()
+ if (l.startswith('author:') or l.startswith('committer:')
+ and not l == 'committer: Tarmac'):
+ email = l.split(' ')[-1]
+ contributors.add(str_dict_replace(email, mailmap))
+
+ authors_file = open('../Authors', 'r').read()
+
+ missing = set()
+ for contributor in contributors:
+ if not contributor in authors_file:
+ missing.add(contributor)
+
+ self.assertTrue(len(missing) == 0,
+ '%r not listed in Authors' % missing)
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index b7caed4fd..6f4705719 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -41,7 +41,6 @@ class NetworkTestCase(test.TrialTestCase):
# flags in the corresponding section in nova-dhcpbridge
self.flags(connection_type='fake',
fake_network=True,
- auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
network_size=16,
num_networks=5)
logging.getLogger().setLevel(logging.DEBUG)
@@ -127,6 +126,7 @@ class NetworkTestCase(test.TrialTestCase):
self.network.deallocate_floating_ip(self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
release_ip(fix_addr)
+ db.floating_ip_destroy(context.get_admin_context(), float_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py
index 9e3afbf4e..b7c1d2acc 100644
--- a/nova/tests/quota_unittest.py
+++ b/nova/tests/quota_unittest.py
@@ -138,11 +138,8 @@ class QuotaTestCase(test.TrialTestCase):
def test_too_many_addresses(self):
address = '192.168.0.100'
- try:
- db.floating_ip_get_by_address(context.get_admin_context(), address)
- except exception.NotFound:
- db.floating_ip_create(context.get_admin_context(),
- {'address': address, 'host': FLAGS.host})
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address, 'host': FLAGS.host})
float_addr = self.network.allocate_floating_ip(self.context,
self.project.id)
# NOTE(vish): This assert never fails. When cloud attempts to
@@ -151,3 +148,4 @@ class QuotaTestCase(test.TrialTestCase):
# that is breaking.
self.assertRaises(cloud.QuotaError, self.cloud.allocate_address,
self.context)
+ db.floating_ip_destroy(context.get_admin_context(), address)
diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py
index 27345d055..cb5fe6b9c 100644
--- a/nova/tests/scheduler_unittest.py
+++ b/nova/tests/scheduler_unittest.py
@@ -81,7 +81,7 @@ class SimpleDriverTestCase(test.TrialTestCase):
max_cores=4,
max_gigabytes=4,
network_manager='nova.network.manager.FlatManager',
- volume_driver='nova.volume.driver.FakeAOEDriver',
+ volume_driver='nova.volume.driver.FakeISCSIDriver',
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
self.scheduler = manager.SchedulerManager()
self.manager = auth_manager.AuthManager()
diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py
index e74e0f726..a268bc4fe 100644
--- a/nova/tests/service_unittest.py
+++ b/nova/tests/service_unittest.py
@@ -23,8 +23,8 @@ Unit Tests for remote procedure calls using queue
import mox
from twisted.application.app import startApplication
+from twisted.internet import defer
-from nova import context
from nova import exception
from nova import flags
from nova import rpc
@@ -48,7 +48,7 @@ class ExtendedService(service.Service):
return 'service'
-class ServiceManagerTestCase(test.BaseTestCase):
+class ServiceManagerTestCase(test.TrialTestCase):
"""Test cases for Services"""
def test_attribute_error_for_no_manager(self):
@@ -75,13 +75,12 @@ class ServiceManagerTestCase(test.BaseTestCase):
self.assertEqual(serv.test_method(), 'service')
-class ServiceTestCase(test.BaseTestCase):
+class ServiceTestCase(test.TrialTestCase):
"""Test cases for Services"""
def setUp(self):
super(ServiceTestCase, self).setUp()
self.mox.StubOutWithMock(service, 'db')
- self.context = context.get_admin_context()
def test_create(self):
host = 'foo'
@@ -144,87 +143,103 @@ class ServiceTestCase(test.BaseTestCase):
# whether it is disconnected, it looks for a variable on itself called
# 'model_disconnected' and report_state doesn't really do much so this
# these are mostly just for coverage
- def test_report_state(self):
- host = 'foo'
- binary = 'bar'
- service_ref = {'host': host,
- 'binary': binary,
- 'report_count': 0,
- 'id': 1}
- service.db.__getattr__('report_state')
- service.db.service_get_by_args(self.context,
- host,
- binary).AndReturn(service_ref)
- service.db.service_update(self.context, service_ref['id'],
- mox.ContainsKeyValue('report_count', 1))
-
- self.mox.ReplayAll()
- s = service.Service()
- rv = yield s.report_state(host, binary)
-
+ @defer.inlineCallbacks
def test_report_state_no_service(self):
host = 'foo'
binary = 'bar'
+ topic = 'test'
service_create = {'host': host,
'binary': binary,
+ 'topic': topic,
'report_count': 0}
service_ref = {'host': host,
- 'binary': binary,
- 'report_count': 0,
- 'id': 1}
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'id': 1}
- service.db.__getattr__('report_state')
- service.db.service_get_by_args(self.context,
+ service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
- service.db.service_create(self.context,
+ service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
- service.db.service_get(self.context,
+ service.db.service_get(mox.IgnoreArg(),
service_ref['id']).AndReturn(service_ref)
- service.db.service_update(self.context, service_ref['id'],
+ service.db.service_update(mox.IgnoreArg(), service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
- s = service.Service()
- rv = yield s.report_state(host, binary)
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.tests.service_unittest.FakeManager')
+ serv.startService()
+ yield serv.report_state()
+ @defer.inlineCallbacks
def test_report_state_newly_disconnected(self):
host = 'foo'
binary = 'bar'
+ topic = 'test'
+ service_create = {'host': host,
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0}
service_ref = {'host': host,
- 'binary': binary,
- 'report_count': 0,
- 'id': 1}
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'id': 1}
- service.db.__getattr__('report_state')
- service.db.service_get_by_args(self.context,
- host,
- binary).AndRaise(Exception())
+ service.db.service_get_by_args(mox.IgnoreArg(),
+ host,
+ binary).AndRaise(exception.NotFound())
+ service.db.service_create(mox.IgnoreArg(),
+ service_create).AndReturn(service_ref)
+ service.db.service_get(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(Exception())
self.mox.ReplayAll()
- s = service.Service()
- rv = yield s.report_state(host, binary)
-
- self.assert_(s.model_disconnected)
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.tests.service_unittest.FakeManager')
+ serv.startService()
+ yield serv.report_state()
+ self.assert_(serv.model_disconnected)
+ @defer.inlineCallbacks
def test_report_state_newly_connected(self):
host = 'foo'
binary = 'bar'
+ topic = 'test'
+ service_create = {'host': host,
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0}
service_ref = {'host': host,
- 'binary': binary,
- 'report_count': 0,
- 'id': 1}
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'id': 1}
- service.db.__getattr__('report_state')
- service.db.service_get_by_args(self.context,
- host,
- binary).AndReturn(service_ref)
- service.db.service_update(self.context, service_ref['id'],
+ service.db.service_get_by_args(mox.IgnoreArg(),
+ host,
+ binary).AndRaise(exception.NotFound())
+ service.db.service_create(mox.IgnoreArg(),
+ service_create).AndReturn(service_ref)
+ service.db.service_get(mox.IgnoreArg(),
+ service_ref['id']).AndReturn(service_ref)
+ service.db.service_update(mox.IgnoreArg(), service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
- s = service.Service()
- s.model_disconnected = True
- rv = yield s.report_state(host, binary)
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.tests.service_unittest.FakeManager')
+ serv.startService()
+ serv.model_disconnected = True
+ yield serv.report_state()
- self.assert_(not s.model_disconnected)
+ self.assert_(not serv.model_disconnected)
diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py
index ce78d450c..d49383fb7 100644
--- a/nova/tests/virt_unittest.py
+++ b/nova/tests/virt_unittest.py
@@ -91,7 +91,7 @@ class LibvirtConnTestCase(test.TrialTestCase):
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
- uri, template = conn.get_uri_and_template()
+ uri, _template, _rescue = conn.get_uri_and_templates()
self.assertEquals(uri, expected_uri)
xml = conn.to_xml(instance_ref)
@@ -114,7 +114,7 @@ class LibvirtConnTestCase(test.TrialTestCase):
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
- uri, template = conn.get_uri_and_template()
+ uri, _template, _rescue = conn.get_uri_and_templates()
self.assertEquals(uri, testuri)
def tearDown(self):
diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
index fdee30b48..12321a96f 100644
--- a/nova/tests/volume_unittest.py
+++ b/nova/tests/volume_unittest.py
@@ -16,7 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Tests for Volume Code
+Tests for Volume Code.
+
"""
import logging
@@ -33,7 +34,8 @@ FLAGS = flags.FLAGS
class VolumeTestCase(test.TrialTestCase):
- """Test Case for volumes"""
+ """Test Case for volumes."""
+
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
@@ -44,7 +46,7 @@ class VolumeTestCase(test.TrialTestCase):
@staticmethod
def _create_volume(size='0'):
- """Create a volume object"""
+ """Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
@@ -56,7 +58,7 @@ class VolumeTestCase(test.TrialTestCase):
@defer.inlineCallbacks
def test_create_delete_volume(self):
- """Test volume can be created and deleted"""
+ """Test volume can be created and deleted."""
volume_id = self._create_volume()
yield self.volume.create_volume(self.context, volume_id)
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
@@ -70,7 +72,7 @@ class VolumeTestCase(test.TrialTestCase):
@defer.inlineCallbacks
def test_too_big_volume(self):
- """Ensure failure if a too large of a volume is requested"""
+ """Ensure failure if a too large of a volume is requested."""
# FIXME(vish): validation needs to move into the data layer in
# volume_create
defer.returnValue(True)
@@ -83,9 +85,9 @@ class VolumeTestCase(test.TrialTestCase):
@defer.inlineCallbacks
def test_too_many_volumes(self):
- """Ensure that NoMoreBlades is raised when we run out of volumes"""
+ """Ensure that NoMoreTargets is raised when we run out of volumes."""
vols = []
- total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
+ total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume_id = self._create_volume()
yield self.volume.create_volume(self.context, volume_id)
@@ -93,14 +95,14 @@ class VolumeTestCase(test.TrialTestCase):
volume_id = self._create_volume()
self.assertFailure(self.volume.create_volume(self.context,
volume_id),
- db.NoMoreBlades)
+ db.NoMoreTargets)
db.volume_destroy(context.get_admin_context(), volume_id)
for volume_id in vols:
yield self.volume.delete_volume(self.context, volume_id)
@defer.inlineCallbacks
def test_run_attach_detach_volume(self):
- """Make sure volume can be attached and detached from instance"""
+ """Make sure volume can be attached and detached from instance."""
inst = {}
inst['image_id'] = 'ami-test'
inst['reservation_id'] = 'r-fakeres'
@@ -148,23 +150,22 @@ class VolumeTestCase(test.TrialTestCase):
db.instance_destroy(self.context, instance_id)
@defer.inlineCallbacks
- def test_concurrent_volumes_get_different_blades(self):
- """Ensure multiple concurrent volumes get different blades"""
+ def test_concurrent_volumes_get_different_targets(self):
+ """Ensure multiple concurrent volumes get different targets."""
volume_ids = []
- shelf_blades = []
+ targets = []
def _check(volume_id):
- """Make sure blades aren't duplicated"""
+ """Make sure targets aren't duplicated."""
volume_ids.append(volume_id)
admin_context = context.get_admin_context()
- (shelf_id, blade_id) = db.volume_get_shelf_and_blade(admin_context,
- volume_id)
- shelf_blade = '%s.%s' % (shelf_id, blade_id)
- self.assert_(shelf_blade not in shelf_blades)
- shelf_blades.append(shelf_blade)
- logging.debug("Blade %s allocated", shelf_blade)
+ iscsi_target = db.volume_get_iscsi_target_num(admin_context,
+ volume_id)
+ self.assert_(iscsi_target not in targets)
+ targets.append(iscsi_target)
+ logging.debug("Target %s allocated", iscsi_target)
deferreds = []
- total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
+ total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume_id = self._create_volume()
d = self.volume.create_volume(self.context, volume_id)
diff --git a/nova/twistd.py b/nova/twistd.py
index 3ec0ff61e..cb5648ce6 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -43,6 +43,8 @@ else:
FLAGS = flags.FLAGS
+flags.DEFINE_string('logdir', None, 'directory to keep log files in '
+ '(will be prepended to $logfile)')
class TwistdServerOptions(ServerOptions):
@@ -246,6 +248,8 @@ def serve(filename):
FLAGS.logfile = '%s.log' % name
elif FLAGS.logfile.endswith('twistd.log'):
FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
+ if FLAGS.logdir:
+ FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
if not FLAGS.prefix:
FLAGS.prefix = name
elif FLAGS.prefix.endswith('twisted'):
diff --git a/nova/utils.py b/nova/utils.py
index e58302c11..142584df8 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -21,6 +21,7 @@ System-level utilities and helper functions.
"""
import datetime
+import functools
import inspect
import logging
import os
@@ -132,13 +133,9 @@ def runthis(prompt, cmd, check_exit_code=True):
def generate_uid(topic, size=8):
- if topic == "i":
- # Instances have integer internal ids.
- return random.randint(0, 2 ** 32 - 1)
- else:
- characters = '01234567890abcdefghijklmnopqrstuvwxyz'
- choices = [random.choice(characters) for x in xrange(size)]
- return '%s-%s' % (topic, ''.join(choices))
+ characters = '01234567890abcdefghijklmnopqrstuvwxyz'
+ choices = [random.choice(characters) for x in xrange(size)]
+ return '%s-%s' % (topic, ''.join(choices))
def generate_mac():
@@ -158,8 +155,8 @@ def get_my_ip():
if getattr(FLAGS, 'fake_tests', None):
return '127.0.0.1'
try:
- csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- csock.connect(('www.google.com', 80))
+ csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
@@ -178,6 +175,24 @@ def parse_isotime(timestr):
return datetime.datetime.strptime(timestr, TIME_FORMAT)
+def parse_mailmap(mailmap='.mailmap'):
+ mapping = {}
+ if os.path.exists(mailmap):
+ fp = open(mailmap, 'r')
+ for l in fp:
+ l = l.strip()
+ if not l.startswith('#') and ' ' in l:
+ canonical_email, alias = l.split(' ')
+ mapping[alias] = canonical_email
+ return mapping
+
+
+def str_dict_replace(s, mapping):
+ for s1, s2 in mapping.iteritems():
+ s = s.replace(s1, s2)
+ return s
+
+
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
@@ -217,10 +232,10 @@ def deferredToThread(f):
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
-
+
Code is directly from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
-
+
"""
return saxutils.escape(value, {'"': "&quot;"})
@@ -236,4 +251,3 @@ def utf8(value):
return value.encode("utf-8")
assert isinstance(value, str)
return value
-
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 34e37adf7..11f0fa8ce 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -17,7 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Abstraction of the underlying virtualization API"""
+"""Abstraction of the underlying virtualization API."""
import logging
import sys
@@ -32,13 +32,26 @@ FLAGS = flags.FLAGS
def get_connection(read_only=False):
- """Returns an object representing the connection to a virtualization
- platform. This could be nova.virt.fake.FakeConnection in test mode,
- a connection to KVM or QEMU via libvirt, or a connection to XenServer
- or Xen Cloud Platform via XenAPI.
+ """
+ Returns an object representing the connection to a virtualization
+ platform.
+
+ This could be :mod:`nova.virt.fake.FakeConnection` in test mode,
+ a connection to KVM, QEMU, or UML via :mod:`libvirt_conn`, or a connection
+ to XenServer or Xen Cloud Platform via :mod:`xenapi`.
Any object returned here must conform to the interface documented by
- FakeConnection.
+ :mod:`FakeConnection`.
+
+ **Related flags**
+
+ :connection_type: A string literal that falls through a if/elif structure
+ to determine what virtualization mechanism to use.
+ Values may be
+
+ * fake
+ * libvirt
+ * xenapi
"""
# TODO(termie): maybe lazy load after initial check for permissions
# TODO(termie): check whether we can be disconnected
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index eaa2261f5..f855523d3 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -18,14 +18,16 @@
# under the License.
"""
-A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor.
-This module also documents the semantics of real hypervisor connections.
-"""
+A fake (in-memory) hypervisor+api.
+
+Allows nova testing w/o a hypervisor. This module also documents the
+semantics of real hypervisor connections.
-import logging
+"""
from twisted.internet import defer
+from nova import exception
from nova.compute import power_state
@@ -119,6 +121,18 @@ class FakeConnection(object):
"""
return defer.succeed(None)
+ def rescue(self, instance):
+ """
+ Rescue the specified instance.
+ """
+ return defer.succeed(None)
+
+ def unrescue(self, instance):
+ """
+ Unrescue the specified instance.
+ """
+ return defer.succeed(None)
+
def destroy(self, instance):
"""
Destroy (shutdown and delete) the specified instance.
@@ -148,7 +162,12 @@ class FakeConnection(object):
current memory the instance has, in KiB, 'num_cpu': The current number
of virtual CPUs the instance has, 'cpu_time': The total CPU time used
by the instance, in nanoseconds.
+
+ This method should raise exception.NotFound if the hypervisor has no
+ knowledge of the instance
"""
+ if instance_name not in self.instances:
+ raise exception.NotFound("Instance %s Not Found" % instance_name)
i = self.instances[instance_name]
return {'state': i._state,
'max_mem': 0,
diff --git a/nova/virt/libvirt.rescue.qemu.xml.template b/nova/virt/libvirt.rescue.qemu.xml.template
new file mode 100644
index 000000000..c0ffbdcee
--- /dev/null
+++ b/nova/virt/libvirt.rescue.qemu.xml.template
@@ -0,0 +1,37 @@
+<domain type='%(type)s'>
+ <name>%(name)s</name>
+ <os>
+ <type>hvm</type>
+ <kernel>%(basepath)s/rescue-kernel</kernel>
+ <initrd>%(basepath)s/rescue-ramdisk</initrd>
+ <cmdline>root=/dev/vda1 console=ttyS0</cmdline>
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <memory>%(memory_kb)s</memory>
+ <vcpu>%(vcpus)s</vcpu>
+ <devices>
+ <disk type='file'>
+ <source file='%(basepath)s/rescue-disk'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='file'>
+ <source file='%(basepath)s/disk'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='bridge'>
+ <source bridge='%(bridge_name)s'/>
+ <mac address='%(mac_address)s'/>
+ <!-- <model type='virtio'/> CANT RUN virtio network right now -->
+ <filterref filter="nova-instance-%(name)s">
+ <parameter name="IP" value="%(ip_address)s" />
+ <parameter name="DHCPSERVER" value="%(dhcp_server)s" />
+ </filterref>
+ </interface>
+ <serial type="file">
+ <source path='%(basepath)s/console.log'/>
+ <target port='1'/>
+ </serial>
+ </devices>
+</domain>
diff --git a/nova/virt/libvirt.rescue.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template
new file mode 100644
index 000000000..836f47532
--- /dev/null
+++ b/nova/virt/libvirt.rescue.uml.xml.template
@@ -0,0 +1,26 @@
+<domain type='%(type)s'>
+ <name>%(name)s</name>
+ <memory>%(memory_kb)s</memory>
+ <os>
+ <type>%(type)s</type>
+ <kernel>/usr/bin/linux</kernel>
+ <root>/dev/ubda1</root>
+ </os>
+ <devices>
+ <disk type='file'>
+ <source file='%(basepath)s/rescue-disk'/>
+ <target dev='ubd0' bus='uml'/>
+ </disk>
+ <disk type='file'>
+ <source file='%(basepath)s/disk'/>
+ <target dev='ubd1' bus='uml'/>
+ </disk>
+ <interface type='bridge'>
+ <source bridge='%(bridge_name)s'/>
+ <mac address='%(mac_address)s'/>
+ </interface>
+ <console type="file">
+ <source path='%(basepath)s/console.log'/>
+ </console>
+ </devices>
+</domain>
diff --git a/nova/virt/libvirt.rescue.xen.xml.template b/nova/virt/libvirt.rescue.xen.xml.template
new file mode 100644
index 000000000..3b8d27237
--- /dev/null
+++ b/nova/virt/libvirt.rescue.xen.xml.template
@@ -0,0 +1,34 @@
+<domain type='%(type)s'>
+ <name>%(name)s</name>
+ <os>
+ <type>linux</type>
+ <kernel>%(basepath)s/kernel</kernel>
+ <initrd>%(basepath)s/ramdisk</initrd>
+ <root>/dev/xvda1</root>
+ <cmdline>ro</cmdline>
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <memory>%(memory_kb)s</memory>
+ <vcpu>%(vcpus)s</vcpu>
+ <devices>
+ <disk type='file'>
+ <source file='%(basepath)s/rescue-disk'/>
+ <target dev='sda' />
+ </disk>
+ <disk type='file'>
+ <source file='%(basepath)s/disk'/>
+ <target dev='sdb' />
+ </disk>
+ <interface type='bridge'>
+ <source bridge='%(bridge_name)s'/>
+ <mac address='%(mac_address)s'/>
+ </interface>
+ <console type="file">
+ <source path='%(basepath)s/console.log'/>
+ <target port='1'/>
+ </console>
+ </devices>
+</domain>
+
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 509ed97a0..18085089f 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -18,7 +18,27 @@
# under the License.
"""
-A connection to a hypervisor (e.g. KVM) through libvirt.
+A connection to a hypervisor through libvirt.
+
+Supports KVM, QEMU, UML, and XEN.
+
+**Related Flags**
+
+:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
+ (default: kvm).
+:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
+:libvirt_xml_template: Libvirt XML Template (QEmu/KVM).
+:libvirt_xen_xml_template: Libvirt XML Template (Xen).
+:libvirt_uml_xml_template: Libvirt XML Template (User Mode Linux).
+:libvirt_rescue_xml_template: XML template for rescue mode (KVM & QEMU).
+:libvirt_rescue_xen_xml_template: XML templage for rescue mode (XEN).
+:libvirt_rescue_uml_xml_template: XML template for rescue mode (UML).
+:rescue_image_id: Rescue ami image (default: ami-rescue).
+:rescue_kernel_id: Rescue aki image (default: aki-rescue).
+:rescue_ramdisk_id: Rescue ari image (default: ari-rescue).
+:injected_network_template: Template file for injected network
+:allow_project_net_traffic: Whether to allow in project network traffic
+
"""
import logging
@@ -48,6 +68,19 @@ libxml2 = None
FLAGS = flags.FLAGS
+flags.DEFINE_string('libvirt_rescue_xml_template',
+ utils.abspath('virt/libvirt.rescue.qemu.xml.template'),
+ 'Libvirt RESCUE XML Template for QEmu/KVM')
+flags.DEFINE_string('libvirt_rescue_xen_xml_template',
+ utils.abspath('virt/libvirt.rescue.xen.xml.template'),
+ 'Libvirt RESCUE XML Template for xen')
+flags.DEFINE_string('libvirt_rescue_uml_xml_template',
+ utils.abspath('virt/libvirt.rescue.uml.xml.template'),
+ 'Libvirt RESCUE XML Template for user-mode-linux')
+# TODO(vish): These flags should probably go into a shared location
+flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
+flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
+flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.qemu.xml.template'),
'Libvirt XML Template for QEmu/KVM')
@@ -87,9 +120,12 @@ def get_connection(read_only):
class LibvirtConnection(object):
def __init__(self, read_only):
- self.libvirt_uri, template_file = self.get_uri_and_template()
+ (self.libvirt_uri,
+ template_file,
+ rescue_file) = self.get_uri_and_templates()
self.libvirt_xml = open(template_file).read()
+ self.rescue_xml = open(rescue_file).read()
self._wrapped_conn = None
self.read_only = read_only
@@ -112,17 +148,20 @@ class LibvirtConnection(object):
return False
raise
- def get_uri_and_template(self):
+ def get_uri_and_templates(self):
if FLAGS.libvirt_type == 'uml':
uri = FLAGS.libvirt_uri or 'uml:///system'
template_file = FLAGS.libvirt_uml_xml_template
+ rescue_file = FLAGS.libvirt_rescue_uml_xml_template
elif FLAGS.libvirt_type == 'xen':
uri = FLAGS.libvirt_uri or 'xen:///'
template_file = FLAGS.libvirt_xen_xml_template
+ rescue_file = FLAGS.libvirt_rescue_xen_xml_template
else:
uri = FLAGS.libvirt_uri or 'qemu:///system'
template_file = FLAGS.libvirt_xml_template
- return uri, template_file
+ rescue_file = FLAGS.libvirt_rescue_xml_template
+ return uri, template_file, rescue_file
def _connect(self, uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
@@ -138,7 +177,7 @@ class LibvirtConnection(object):
return [self._conn.lookupByID(x).name()
for x in self._conn.listDomainsID()]
- def destroy(self, instance):
+ def destroy(self, instance, cleanup=True):
try:
virt_dom = self._conn.lookupByName(instance['name'])
virt_dom.destroy()
@@ -146,10 +185,11 @@ class LibvirtConnection(object):
pass
# If the instance is already terminated, we're still happy
d = defer.Deferred()
- d.addCallback(lambda _: self._cleanup(instance))
+ if cleanup:
+ d.addCallback(lambda _: self._cleanup(instance))
# FIXME: What does this comment mean?
# TODO(termie): short-circuit me for tests
- # WE'LL save this for when we do shutdown,
+ # WE'LL save this for when we do shutdown,
# instead of destroy - but destroy returns immediately
timer = task.LoopingCall(f=None)
@@ -182,25 +222,52 @@ class LibvirtConnection(object):
@defer.inlineCallbacks
@exception.wrap_exception
def attach_volume(self, instance_name, device_path, mountpoint):
- yield process.simple_execute("sudo virsh attach-disk %s %s %s" %
- (instance_name,
- device_path,
- mountpoint.rpartition('/dev/')[2]))
+ virt_dom = self._conn.lookupByName(instance_name)
+ mount_device = mountpoint.rpartition("/")[2]
+ xml = """<disk type='block'>
+ <driver name='qemu' type='raw'/>
+ <source dev='%s'/>
+ <target dev='%s' bus='virtio'/>
+ </disk>""" % (device_path, mount_device)
+ virt_dom.attachDevice(xml)
+ yield
+
+ def _get_disk_xml(self, xml, device):
+ """Returns the xml for the disk mounted at device"""
+ try:
+ doc = libxml2.parseDoc(xml)
+ except:
+ return None
+ ctx = doc.xpathNewContext()
+ try:
+ ret = ctx.xpathEval('/domain/devices/disk')
+ for node in ret:
+ for child in node.children:
+ if child.name == 'target':
+ if child.prop('dev') == device:
+ return str(node)
+ finally:
+ if ctx != None:
+ ctx.xpathFreeContext()
+ if doc != None:
+ doc.freeDoc()
@defer.inlineCallbacks
@exception.wrap_exception
def detach_volume(self, instance_name, mountpoint):
- # NOTE(vish): despite the documentation, virsh detach-disk just
- # wants the device name without the leading /dev/
- yield process.simple_execute("sudo virsh detach-disk %s %s" %
- (instance_name,
- mountpoint.rpartition('/dev/')[2]))
+ virt_dom = self._conn.lookupByName(instance_name)
+ mount_device = mountpoint.rpartition("/")[2]
+ xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
+ if not xml:
+ raise exception.NotFound("No disk at %s" % mount_device)
+ virt_dom.detachDevice(xml)
+ yield
@defer.inlineCallbacks
@exception.wrap_exception
def reboot(self, instance):
+ yield self.destroy(instance, False)
xml = self.to_xml(instance)
- yield self._conn.lookupByName(instance['name']).destroy()
yield self._conn.createXML(xml, 0)
d = defer.Deferred()
@@ -229,6 +296,48 @@ class LibvirtConnection(object):
@defer.inlineCallbacks
@exception.wrap_exception
+ def rescue(self, instance):
+ yield self.destroy(instance, False)
+
+ xml = self.to_xml(instance, rescue=True)
+ rescue_images = {'image_id': FLAGS.rescue_image_id,
+ 'kernel_id': FLAGS.rescue_kernel_id,
+ 'ramdisk_id': FLAGS.rescue_ramdisk_id}
+ yield self._create_image(instance, xml, 'rescue-', rescue_images)
+ yield self._conn.createXML(xml, 0)
+
+ d = defer.Deferred()
+ timer = task.LoopingCall(f=None)
+
+ def _wait_for_rescue():
+ try:
+ state = self.get_info(instance['name'])['state']
+ db.instance_set_state(None, instance['id'], state)
+ if state == power_state.RUNNING:
+ logging.debug('instance %s: rescued', instance['name'])
+ timer.stop()
+ d.callback(None)
+ except Exception, exn:
+ logging.error('_wait_for_rescue failed: %s', exn)
+ db.instance_set_state(None,
+ instance['id'],
+ power_state.SHUTDOWN)
+ timer.stop()
+ d.callback(None)
+
+ timer.f = _wait_for_rescue
+ timer.start(interval=0.5, now=True)
+ yield d
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def unrescue(self, instance):
+ # NOTE(vish): Because reboot destroys and recreates an instance using
+ # the normal xml file, we can just call reboot here
+ yield self.reboot(instance)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
def spawn(self, instance):
xml = self.to_xml(instance)
db.instance_set_state(context.get_admin_context(),
@@ -239,8 +348,6 @@ class LibvirtConnection(object):
setup_nwfilters_for_instance(instance)
yield self._create_image(instance, xml)
yield self._conn.createXML(xml, 0)
- # TODO(termie): this should actually register
- # a callback to check for successful boot
logging.debug("instance %s: is running", instance['name'])
local_d = defer.Deferred()
@@ -311,15 +418,16 @@ class LibvirtConnection(object):
return d
@defer.inlineCallbacks
- def _create_image(self, inst, libvirt_xml):
+ def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
# syntactic nicety
- basepath = lambda fname='': os.path.join(FLAGS.instances_path,
+ basepath = lambda fname = '', prefix = prefix: os.path.join(
+ FLAGS.instances_path,
inst['name'],
- fname)
+ prefix + fname)
# ensure directories exist and are writable
- yield process.simple_execute('mkdir -p %s' % basepath())
- yield process.simple_execute('chmod 0777 %s' % basepath())
+ yield process.simple_execute('mkdir -p %s' % basepath(prefix=''))
+ yield process.simple_execute('chmod 0777 %s' % basepath(prefix=''))
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
@@ -328,12 +436,17 @@ class LibvirtConnection(object):
f.write(libvirt_xml)
f.close()
- os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY,
- 0660))
+ # NOTE(vish): No need add the prefix to console.log
+ os.close(os.open(basepath('console.log', ''),
+ os.O_CREAT | os.O_WRONLY, 0660))
user = manager.AuthManager().get_user(inst['user_id'])
project = manager.AuthManager().get_project(inst['project_id'])
+ if not disk_images:
+ disk_images = {'image_id': inst['image_id'],
+ 'kernel_id': inst['kernel_id'],
+ 'ramdisk_id': inst['ramdisk_id']}
if not os.path.exists(basepath('disk')):
yield images.fetch(inst.image_id, basepath('disk-raw'), user,
project)
@@ -344,7 +457,7 @@ class LibvirtConnection(object):
yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user,
project)
- execute = lambda cmd, process_input=None, check_exit_code=True: \
+ execute = lambda cmd, process_input = None, check_exit_code = True: \
process.simple_execute(cmd=cmd,
process_input=process_input,
check_exit_code=check_exit_code)
@@ -379,7 +492,9 @@ class LibvirtConnection(object):
['local_gb']
* 1024 * 1024 * 1024)
- resize = inst['instance_type'] != 'm1.tiny'
+ resize = True
+ if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-':
+ resize = False
yield disk.partition(basepath('disk-raw'), basepath('disk'),
local_bytes, resize, execute=execute)
@@ -387,7 +502,7 @@ class LibvirtConnection(object):
yield process.simple_execute('sudo chown root %s' %
basepath('disk'))
- def to_xml(self, instance):
+ def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
logging.debug('instance %s: starting toXML method', instance['name'])
network = db.project_get_network(context.get_admin_context(),
@@ -409,13 +524,19 @@ class LibvirtConnection(object):
'mac_address': instance['mac_address'],
'ip_address': ip_address,
'dhcp_server': dhcp_server}
- libvirt_xml = self.libvirt_xml % xml_info
+ if rescue:
+ libvirt_xml = self.rescue_xml % xml_info
+ else:
+ libvirt_xml = self.libvirt_xml % xml_info
logging.debug('instance %s: finished toXML method', instance['name'])
return libvirt_xml
def get_info(self, instance_name):
- virt_dom = self._conn.lookupByName(instance_name)
+ try:
+ virt_dom = self._conn.lookupByName(instance_name)
+ except:
+ raise exception.NotFound("Instance %s not found" % instance_name)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
'max_mem': max_mem,
diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py
index a17e405ab..3169562a5 100644
--- a/nova/virt/xenapi.py
+++ b/nova/virt/xenapi.py
@@ -33,6 +33,18 @@ long-running operations.
FIXME: get_info currently doesn't conform to these rules, and will block the
reactor thread if the VM.get_by_name_label or VM.get_record calls block.
+
+**Related Flags**
+
+:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
+:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
+ Platform (default: root).
+:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
+ Platform.
+:xenapi_task_poll_interval: The interval (seconds) used for polling of
+ remote tasks (Async.VM.start, etc)
+ (default: 0.5).
+
"""
import logging
@@ -274,11 +286,21 @@ class XenAPIConnection(object):
# Don't complain, just return. This lets us clean up instances
# that have already disappeared from the underlying platform.
defer.returnValue(None)
+ # Get the VDIs related to the VM
+ vdis = yield self._lookup_vm_vdis(vm)
try:
task = yield self._call_xenapi('Async.VM.hard_shutdown', vm)
yield self._wait_for_task(task)
except Exception, exc:
logging.warn(exc)
+ # Disk clean-up
+ if vdis:
+ for vdi in vdis:
+ try:
+ task = yield self._call_xenapi('Async.VDI.destroy', vdi)
+ yield self._wait_for_task(task)
+ except Exception, exc:
+ logging.warn(exc)
try:
task = yield self._call_xenapi('Async.VM.destroy', vm)
yield self._wait_for_task(task)
@@ -313,6 +335,30 @@ class XenAPIConnection(object):
else:
return vms[0]
+ @utils.deferredToThread
+ def _lookup_vm_vdis(self, vm):
+ return self._lookup_vm_vdis_blocking(vm)
+
+ def _lookup_vm_vdis_blocking(self, vm):
+ # Firstly we get the VBDs, then the VDIs.
+ # TODO: do we leave the read-only devices?
+ vbds = self._conn.xenapi.VM.get_VBDs(vm)
+ vdis = []
+ if vbds:
+ for vbd in vbds:
+ try:
+ vdi = self._conn.xenapi.VBD.get_VDI(vbd)
+ # Test valid VDI
+ record = self._conn.xenapi.VDI.get_record(vdi)
+ except Exception, exc:
+ logging.warn(exc)
+ else:
+ vdis.append(vdi)
+ if len(vdis) > 0:
+ return vdis
+ else:
+ return None
+
def _wait_for_task(self, task):
"""Return a Deferred that will give the result of the given task.
The task is polled until it completes."""
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 3fa29ba37..156aad2a0 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -15,18 +15,20 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
-Drivers for volumes
+Drivers for volumes.
+
"""
import logging
+import os
from twisted.internet import defer
from nova import exception
from nova import flags
from nova import process
+from nova import utils
FLAGS = flags.FLAGS
@@ -36,12 +38,29 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
flags.DEFINE_string('num_shell_tries', 3,
'number of times to attempt to run flakey shell commands')
+flags.DEFINE_integer('num_shelves',
+ 100,
+ 'Number of vblade shelves')
+flags.DEFINE_integer('blades_per_shelf',
+ 16,
+ 'Number of vblade blades per shelf')
+flags.DEFINE_integer('iscsi_num_targets',
+ 100,
+ 'Number of iscsi target ids per host')
+flags.DEFINE_string('iscsi_target_prefix', 'iqn.2010-10.org.openstack:',
+ 'prefix for iscsi volumes')
+flags.DEFINE_string('iscsi_ip_prefix', '127.0',
+ 'discover volumes on the ip that starts with this prefix')
-class AOEDriver(object):
- """Executes commands relating to AOE volumes"""
- def __init__(self, execute=process.simple_execute, *args, **kwargs):
+class VolumeDriver(object):
+ """Executes commands relating to Volumes."""
+ def __init__(self, execute=process.simple_execute,
+ sync_exec=utils.execute, *args, **kwargs):
+ # NOTE(vish): db is set by Manager
+ self.db = None
self._execute = execute
+ self._sync_exec = sync_exec
@defer.inlineCallbacks
def _try_execute(self, command):
@@ -61,55 +80,95 @@ class AOEDriver(object):
"Try number %s", tries)
yield self._execute("sleep %s" % tries ** 2)
+ def check_for_setup_error(self):
+ """Returns an error if prerequisites aren't met"""
+ if not os.path.isdir("/dev/%s" % FLAGS.volume_group):
+ raise exception.Error("volume group %s doesn't exist"
+ % FLAGS.volume_group)
+
@defer.inlineCallbacks
- def create_volume(self, volume_name, size):
- """Creates a logical volume"""
- # NOTE(vish): makes sure that the volume group exists
- yield self._execute("vgs %s" % FLAGS.volume_group)
- if int(size) == 0:
+ def create_volume(self, volume):
+ """Creates a logical volume."""
+ if int(volume['size']) == 0:
sizestr = '100M'
else:
- sizestr = '%sG' % size
+ sizestr = '%sG' % volume['size']
yield self._try_execute("sudo lvcreate -L %s -n %s %s" %
(sizestr,
- volume_name,
+ volume['name'],
FLAGS.volume_group))
@defer.inlineCallbacks
- def delete_volume(self, volume_name):
- """Deletes a logical volume"""
+ def delete_volume(self, volume):
+ """Deletes a logical volume."""
yield self._try_execute("sudo lvremove -f %s/%s" %
(FLAGS.volume_group,
- volume_name))
+ volume['name']))
@defer.inlineCallbacks
- def create_export(self, volume_name, shelf_id, blade_id):
- """Creates an export for a logical volume"""
- yield self._try_execute(
- "sudo vblade-persist setup %s %s %s /dev/%s/%s" %
- (shelf_id,
- blade_id,
- FLAGS.aoe_eth_dev,
- FLAGS.volume_group,
- volume_name))
+ def local_path(self, volume):
+ yield # NOTE(vish): stops deprecation warning
+ escaped_group = FLAGS.volume_group.replace('-', '--')
+ escaped_name = volume['name'].replace('-', '--')
+ defer.returnValue("/dev/mapper/%s-%s" % (escaped_group,
+ escaped_name))
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ raise NotImplementedError()
@defer.inlineCallbacks
- def discover_volume(self, _volume_name):
- """Discover volume on a remote host"""
- yield self._execute("sudo aoe-discover")
- yield self._execute("sudo aoe-stat")
+ def create_export(self, context, volume):
+ """Exports the volume."""
+ raise NotImplementedError()
@defer.inlineCallbacks
- def remove_export(self, _volume_name, shelf_id, blade_id):
- """Removes an export for a logical volume"""
- yield self._try_execute("sudo vblade-persist stop %s %s" %
- (shelf_id, blade_id))
- yield self._try_execute("sudo vblade-persist destroy %s %s" %
- (shelf_id, blade_id))
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+ raise NotImplementedError()
+
+ @defer.inlineCallbacks
+ def discover_volume(self, volume):
+ """Discover volume on a remote host."""
+ raise NotImplementedError()
+
+ @defer.inlineCallbacks
+ def undiscover_volume(self, volume):
+ """Undiscover volume on a remote host."""
+ raise NotImplementedError()
+
+
+class AOEDriver(VolumeDriver):
+ """Implements AOE specific volume commands."""
+
+ def ensure_export(self, context, volume):
+ # NOTE(vish): we depend on vblade-persist for recreating exports
+ pass
+
+ def _ensure_blades(self, context):
+ """Ensure that blades have been created in datastore."""
+ total_blades = FLAGS.num_shelves * FLAGS.blades_per_shelf
+ if self.db.export_device_count(context) >= total_blades:
+ return
+ for shelf_id in xrange(FLAGS.num_shelves):
+ for blade_id in xrange(FLAGS.blades_per_shelf):
+ dev = {'shelf_id': shelf_id, 'blade_id': blade_id}
+ self.db.export_device_create_safe(context, dev)
@defer.inlineCallbacks
- def ensure_exports(self):
- """Runs all existing exports"""
+ def create_export(self, context, volume):
+ """Creates an export for a logical volume."""
+ self._ensure_blades(context)
+ (shelf_id,
+ blade_id) = self.db.volume_allocate_shelf_and_blade(context,
+ volume['id'])
+ yield self._try_execute(
+ "sudo vblade-persist setup %s %s %s /dev/%s/%s" %
+ (shelf_id,
+ blade_id,
+ FLAGS.aoe_eth_dev,
+ FLAGS.volume_group,
+ volume['name']))
# NOTE(vish): The standard _try_execute does not work here
# because these methods throw errors if other
# volumes on this host are in the process of
@@ -123,13 +182,155 @@ class AOEDriver(object):
yield self._execute("sudo vblade-persist start all",
check_exit_code=False)
+ @defer.inlineCallbacks
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+ (shelf_id,
+ blade_id) = self.db.volume_get_shelf_and_blade(context,
+ volume['id'])
+ yield self._try_execute("sudo vblade-persist stop %s %s" %
+ (shelf_id, blade_id))
+ yield self._try_execute("sudo vblade-persist destroy %s %s" %
+ (shelf_id, blade_id))
+
+ @defer.inlineCallbacks
+ def discover_volume(self, _volume):
+ """Discover volume on a remote host."""
+ yield self._execute("sudo aoe-discover")
+ yield self._execute("sudo aoe-stat", check_exit_code=False)
+
+ @defer.inlineCallbacks
+ def undiscover_volume(self, _volume):
+ """Undiscover volume on a remote host."""
+ yield
+
class FakeAOEDriver(AOEDriver):
- """Logs calls instead of executing"""
+ """Logs calls instead of executing."""
+
def __init__(self, *args, **kwargs):
- super(FakeAOEDriver, self).__init__(self.fake_execute)
+ super(FakeAOEDriver, self).__init__(execute=self.fake_execute,
+ sync_exec=self.fake_execute,
+ *args, **kwargs)
+
+ def check_for_setup_error(self):
+ """No setup necessary in fake mode."""
+ pass
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
- """Execute that simply logs the command"""
+ """Execute that simply logs the command."""
logging.debug("FAKE AOE: %s", cmd)
+ return (None, None)
+
+
+class ISCSIDriver(VolumeDriver):
+ """Executes commands relating to ISCSI volumes."""
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ volume['id'])
+ iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
+ volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
+ self._sync_exec("sudo ietadm --op new "
+ "--tid=%s --params Name=%s" %
+ (iscsi_target, iscsi_name),
+ check_exit_code=False)
+ self._sync_exec("sudo ietadm --op new --tid=%s "
+ "--lun=0 --params Path=%s,Type=fileio" %
+ (iscsi_target, volume_path),
+ check_exit_code=False)
+
+ def _ensure_iscsi_targets(self, context, host):
+ """Ensure that target ids have been created in datastore."""
+ host_iscsi_targets = self.db.iscsi_target_count_by_host(context, host)
+ if host_iscsi_targets >= FLAGS.iscsi_num_targets:
+ return
+ # NOTE(vish): Target ids start at 1, not 0.
+ for target_num in xrange(1, FLAGS.iscsi_num_targets + 1):
+ target = {'host': host, 'target_num': target_num}
+ self.db.iscsi_target_create_safe(context, target)
+
+ @defer.inlineCallbacks
+ def create_export(self, context, volume):
+ """Creates an export for a logical volume."""
+ self._ensure_iscsi_targets(context, volume['host'])
+ iscsi_target = self.db.volume_allocate_iscsi_target(context,
+ volume['id'],
+ volume['host'])
+ iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
+ volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
+ yield self._execute("sudo ietadm --op new "
+ "--tid=%s --params Name=%s" %
+ (iscsi_target, iscsi_name))
+ yield self._execute("sudo ietadm --op new --tid=%s "
+ "--lun=0 --params Path=%s,Type=fileio" %
+ (iscsi_target, volume_path))
+
+ @defer.inlineCallbacks
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ volume['id'])
+ yield self._execute("sudo ietadm --op delete --tid=%s "
+ "--lun=0" % iscsi_target)
+ yield self._execute("sudo ietadm --op delete --tid=%s" %
+ iscsi_target)
+
+ @defer.inlineCallbacks
+ def _get_name_and_portal(self, volume_name, host):
+ """Gets iscsi name and portal from volume name and host."""
+ (out, _err) = yield self._execute("sudo iscsiadm -m discovery -t "
+ "sendtargets -p %s" % host)
+ for target in out.splitlines():
+ if FLAGS.iscsi_ip_prefix in target and volume_name in target:
+ (location, _sep, iscsi_name) = target.partition(" ")
+ break
+ iscsi_portal = location.split(",")[0]
+ defer.returnValue((iscsi_name, iscsi_portal))
+
+ @defer.inlineCallbacks
+ def discover_volume(self, volume):
+ """Discover volume on a remote host."""
+ (iscsi_name,
+ iscsi_portal) = yield self._get_name_and_portal(volume['name'],
+ volume['host'])
+ yield self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
+ (iscsi_name, iscsi_portal))
+ yield self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
+ "-n node.startup -v automatic" %
+ (iscsi_name, iscsi_portal))
+ defer.returnValue("/dev/iscsi/%s" % volume['name'])
+
+ @defer.inlineCallbacks
+ def undiscover_volume(self, volume):
+ """Undiscover volume on a remote host."""
+ (iscsi_name,
+ iscsi_portal) = yield self._get_name_and_portal(volume['name'],
+ volume['host'])
+ yield self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
+ "-n node.startup -v manual" %
+ (iscsi_name, iscsi_portal))
+ yield self._execute("sudo iscsiadm -m node -T %s -p %s --logout " %
+ (iscsi_name, iscsi_portal))
+ yield self._execute("sudo iscsiadm -m node --op delete "
+ "--targetname %s" % iscsi_name)
+
+
+class FakeISCSIDriver(ISCSIDriver):
+ """Logs calls instead of executing."""
+ def __init__(self, *args, **kwargs):
+ super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
+ sync_exec=self.fake_execute,
+ *args, **kwargs)
+
+ def check_for_setup_error(self):
+ """No setup necessary in fake mode."""
+ pass
+
+ @staticmethod
+ def fake_execute(cmd, *_args, **_kwargs):
+ """Execute that simply logs the command."""
+ logging.debug("FAKE ISCSI: %s", cmd)
+ return (None, None)
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 2874459f9..589e7d7d9 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -15,10 +15,31 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
-Volume manager manages creating, attaching, detaching, and
-destroying persistent storage volumes, ala EBS.
+Volume manager manages creating, attaching, detaching, and persistent storage.
+
+Persistant storage volumes keep their state independent of instances. You can
+attach to an instance, terminate the instance, spawn a new instance (even
+one from a different image) and re-attach the volume with the same data
+intact.
+
+**Related Flags**
+
+:volume_topic: What :mod:`rpc` topic to listen to (default: `volume`).
+:volume_manager: The module name of a class derived from
+ :class:`manager.Manager` (default:
+ :class:`nova.volume.manager.AOEManager`).
+:storage_availability_zone: Defaults to `nova`.
+:volume_driver: Used by :class:`AOEManager`. Defaults to
+ :class:`nova.volume.driver.AOEDriver`.
+:num_shelves: Number of shelves for AoE (default: 100).
+:num_blades: Number of vblades per shelf to allocate AoE storage from
+ (default: 16).
+:volume_group: Name of the group that will contain exported volumes (default:
+ `nova-volumes`)
+:aoe_eth_dev: Device name the volumes will be exported on (default: `eth0`).
+:num_shell_tries: Number of times to attempt to run AoE commands (default: 3)
+
"""
import logging
@@ -26,6 +47,7 @@ import datetime
from twisted.internet import defer
+from nova import context
from nova import exception
from nova import flags
from nova import manager
@@ -36,100 +58,98 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('storage_availability_zone',
'nova',
'availability zone of this service')
-flags.DEFINE_string('volume_driver', 'nova.volume.driver.AOEDriver',
+flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver',
'Driver to use for volume creation')
-flags.DEFINE_integer('num_shelves',
- 100,
- 'Number of vblade shelves')
-flags.DEFINE_integer('blades_per_shelf',
- 16,
- 'Number of vblade blades per shelf')
+flags.DEFINE_boolean('use_local_volumes', True,
+ 'if True, will not discover local volumes')
-class AOEManager(manager.Manager):
- """Manages Ata-Over_Ethernet volumes"""
+class VolumeManager(manager.Manager):
+ """Manages attachable block storage devices."""
def __init__(self, volume_driver=None, *args, **kwargs):
+ """Load the driver from the one specified in args, or from flags."""
if not volume_driver:
volume_driver = FLAGS.volume_driver
self.driver = utils.import_object(volume_driver)
- super(AOEManager, self).__init__(*args, **kwargs)
-
- def _ensure_blades(self, context):
- """Ensure that blades have been created in datastore"""
- total_blades = FLAGS.num_shelves * FLAGS.blades_per_shelf
- if self.db.export_device_count(context) >= total_blades:
- return
- for shelf_id in xrange(FLAGS.num_shelves):
- for blade_id in xrange(FLAGS.blades_per_shelf):
- dev = {'shelf_id': shelf_id, 'blade_id': blade_id}
- self.db.export_device_create_safe(context, dev)
+ super(VolumeManager, self).__init__(*args, **kwargs)
+ # NOTE(vish): Implementation specific db handling is done
+ # by the driver.
+ self.driver.db = self.db
+
+ def init_host(self):
+ """Do any initialization that needs to be run if this is a
+ standalone service."""
+ self.driver.check_for_setup_error()
+ ctxt = context.get_admin_context()
+ volumes = self.db.volume_get_all_by_host(ctxt, self.host)
+ logging.debug("Re-exporting %s volumes", len(volumes))
+ for volume in volumes:
+ self.driver.ensure_export(ctxt, volume)
@defer.inlineCallbacks
def create_volume(self, context, volume_id):
- """Creates and exports the volume"""
+ """Creates and exports the volume."""
context = context.elevated()
- logging.info("volume %s: creating", volume_id)
-
volume_ref = self.db.volume_get(context, volume_id)
+ logging.info("volume %s: creating", volume_ref['name'])
self.db.volume_update(context,
volume_id,
{'host': self.host})
+ # NOTE(vish): so we don't have to get volume from db again
+ # before passing it to the driver.
+ volume_ref['host'] = self.host
- size = volume_ref['size']
- logging.debug("volume %s: creating lv of size %sG", volume_id, size)
- yield self.driver.create_volume(volume_ref['ec2_id'], size)
-
- logging.debug("volume %s: allocating shelf & blade", volume_id)
- self._ensure_blades(context)
- rval = self.db.volume_allocate_shelf_and_blade(context, volume_id)
- (shelf_id, blade_id) = rval
-
- logging.debug("volume %s: exporting shelf %s & blade %s", volume_id,
- shelf_id, blade_id)
+ logging.debug("volume %s: creating lv of size %sG",
+ volume_ref['name'], volume_ref['size'])
+ yield self.driver.create_volume(volume_ref)
- yield self.driver.create_export(volume_ref['ec2_id'],
- shelf_id,
- blade_id)
-
- logging.debug("volume %s: re-exporting all values", volume_id)
- yield self.driver.ensure_exports()
+ logging.debug("volume %s: creating export", volume_ref['name'])
+ yield self.driver.create_export(context, volume_ref)
now = datetime.datetime.utcnow()
self.db.volume_update(context,
volume_ref['id'], {'status': 'available',
'launched_at': now})
- logging.debug("volume %s: created successfully", volume_id)
+ logging.debug("volume %s: created successfully", volume_ref['name'])
defer.returnValue(volume_id)
@defer.inlineCallbacks
def delete_volume(self, context, volume_id):
- """Deletes and unexports volume"""
+ """Deletes and unexports volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
if volume_ref['attach_status'] == "attached":
raise exception.Error("Volume is still attached")
if volume_ref['host'] != self.host:
raise exception.Error("Volume is not local to this node")
- logging.debug("Deleting volume with id of: %s", volume_id)
- shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context,
- volume_id)
- yield self.driver.remove_export(volume_ref['ec2_id'],
- shelf_id,
- blade_id)
- yield self.driver.delete_volume(volume_ref['ec2_id'])
+ logging.debug("volume %s: removing export", volume_ref['name'])
+ yield self.driver.remove_export(context, volume_ref)
+ logging.debug("volume %s: deleting", volume_ref['name'])
+ yield self.driver.delete_volume(volume_ref)
self.db.volume_destroy(context, volume_id)
+ logging.debug("volume %s: deleted successfully", volume_ref['name'])
defer.returnValue(True)
@defer.inlineCallbacks
def setup_compute_volume(self, context, volume_id):
- """Setup remote volume on compute host
+ """Setup remote volume on compute host.
- Returns path to device.
- """
+ Returns path to device."""
+ context = context.elevated()
+ volume_ref = self.db.volume_get(context, volume_id)
+ if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
+ path = yield self.driver.local_path(volume_ref)
+ else:
+ path = yield self.driver.discover_volume(volume_ref)
+ defer.returnValue(path)
+
+ @defer.inlineCallbacks
+ def remove_compute_volume(self, context, volume_id):
+ """Remove remote volume on compute host."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
- yield self.driver.discover_volume(volume_ref['ec2_id'])
- shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context,
- volume_id)
- defer.returnValue("/dev/etherd/e%s.%s" % (shelf_id, blade_id))
+ if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
+ defer.returnValue(True)
+ else:
+ yield self.driver.undiscover_volume(volume_ref)
diff --git a/nova/wsgi.py b/nova/wsgi.py
index eb305a3d3..c7ee9ed14 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -28,7 +28,7 @@ from xml.dom import minidom
import eventlet
import eventlet.wsgi
-eventlet.patcher.monkey_patch(all=False, socket=True)
+eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
import routes
import routes.middleware
import webob
@@ -39,10 +39,27 @@ import webob.exc
logging.getLogger("routes.middleware").addHandler(logging.StreamHandler())
-def run_server(application, port):
- """Run a WSGI server with the given application."""
- sock = eventlet.listen(('0.0.0.0', port))
- eventlet.wsgi.server(sock, application)
+class Server(object):
+ """Server class to manage multiple WSGI sockets and applications."""
+
+ def __init__(self, threads=1000):
+ self.pool = eventlet.GreenPool(threads)
+
+ def start(self, application, port, host='0.0.0.0', backlog=128):
+ """Run a WSGI server with the given application."""
+ socket = eventlet.listen((host, port), backlog=backlog)
+ self.pool.spawn_n(self._run, application, socket)
+
+ def wait(self):
+ """Wait until all servers have completed running."""
+ try:
+ self.pool.waitall()
+ except KeyboardInterrupt:
+ pass
+
+ def _run(self, application, socket):
+ """Start a WSGI server in a new green thread."""
+ eventlet.wsgi.server(socket, application, custom_pool=self.pool)
class Application(object):