summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorSoren Hansen <soren.hansen@rackspace.com>2010-11-29 13:14:26 +0100
committerSoren Hansen <soren.hansen@rackspace.com>2010-11-29 13:14:26 +0100
commitc5d3e310376b3fb5c548e1d2e70c5ce7a489bb9f (patch)
tree15a73446e1daa6829a68e05a3cef862924428473 /nova
parentb65b41e5957d5ded516343b3611292c9744d169f (diff)
parent4f92d1d39fcfda4dad73e6e0339351f0d7d00d61 (diff)
downloadnova-c5d3e310376b3fb5c548e1d2e70c5ce7a489bb9f.tar.gz
nova-c5d3e310376b3fb5c548e1d2e70c5ce7a489bb9f.tar.xz
nova-c5d3e310376b3fb5c548e1d2e70c5ce7a489bb9f.zip
Merge trunk.
Diffstat (limited to 'nova')
-rw-r--r--nova/adminclient.py92
-rw-r--r--nova/api/__init__.py8
-rw-r--r--nova/api/ec2/__init__.py8
-rw-r--r--nova/api/ec2/cloud.py30
-rw-r--r--nova/api/ec2/images.py123
-rw-r--r--nova/api/openstack/images.py13
-rw-r--r--nova/auth/fakeldap.py11
-rw-r--r--nova/auth/manager.py3
-rw-r--r--nova/compute/disk.py38
-rw-r--r--nova/compute/manager.py39
-rw-r--r--nova/compute/monitor.py4
-rw-r--r--nova/crypto.py15
-rw-r--r--nova/db/api.py138
-rw-r--r--nova/db/sqlalchemy/api.py13
-rw-r--r--nova/db/sqlalchemy/models.py53
-rw-r--r--nova/flags.py6
-rw-r--r--nova/image/glance.py (renamed from nova/image/services/glance/__init__.py)24
-rw-r--r--nova/image/local.py88
-rw-r--r--nova/image/s3.py109
-rw-r--r--nova/image/service.py131
-rw-r--r--nova/image/services/__init__.py0
-rw-r--r--nova/manager.py34
-rw-r--r--nova/network/linux_net.py22
-rw-r--r--nova/network/manager.py201
-rw-r--r--nova/objectstore/bucket.py4
-rw-r--r--nova/objectstore/handler.py4
-rw-r--r--nova/objectstore/image.py4
-rw-r--r--nova/rpc.py30
-rw-r--r--nova/server.py4
-rw-r--r--nova/service.py7
-rw-r--r--nova/tests/api/openstack/fakes.py20
-rw-r--r--nova/tests/api/openstack/test_images.py41
-rw-r--r--nova/tests/cloud_unittest.py35
-rw-r--r--nova/tests/fake_flags.py1
-rw-r--r--nova/tests/misc_unittest.py48
-rw-r--r--nova/tests/network_unittest.py2
-rw-r--r--nova/tests/quota_unittest.py8
-rw-r--r--nova/tests/volume_unittest.py20
-rw-r--r--nova/twistd.py4
-rw-r--r--nova/utils.py19
-rw-r--r--nova/virt/connection.py25
-rw-r--r--nova/virt/fake.py7
-rw-r--r--nova/virt/libvirt_conn.py26
-rw-r--r--nova/virt/xenapi.py46
-rw-r--r--nova/volume/driver.py60
-rw-r--r--nova/volume/manager.py44
-rw-r--r--nova/wsgi.py2
47 files changed, 1059 insertions, 605 deletions
diff --git a/nova/adminclient.py b/nova/adminclient.py
index b7a3d2c32..5a62cce7d 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -22,25 +22,28 @@ Nova User API client library.
import base64
import boto
import httplib
+
+from nova import flags
from boto.ec2.regioninfo import RegionInfo
+FLAGS = flags.FLAGS
+
DEFAULT_CLC_URL = 'http://127.0.0.1:8773'
DEFAULT_REGION = 'nova'
-DEFAULT_ACCESS_KEY = 'admin'
-DEFAULT_SECRET_KEY = 'admin'
class UserInfo(object):
"""
- Information about a Nova user, as parsed through SAX
- fields include:
- username
- accesskey
- secretkey
-
- and an optional field containing a zip with X509 cert & rc
- file
+ Information about a Nova user, as parsed through SAX.
+
+ **Fields Include**
+
+ * username
+ * accesskey
+ * secretkey
+ * file (optional) containing zip of X509 cert & rc file
+
"""
def __init__(self, connection=None, username=None, endpoint=None):
@@ -68,9 +71,13 @@ class UserInfo(object):
class UserRole(object):
"""
Information about a Nova user's role, as parsed through SAX.
- Fields include:
- role
+
+ **Fields include**
+
+ * role
+
"""
+
def __init__(self, connection=None):
self.connection = connection
self.role = None
@@ -90,12 +97,15 @@ class UserRole(object):
class ProjectInfo(object):
"""
- Information about a Nova project, as parsed through SAX
- Fields include:
- projectname
- description
- projectManagerId
- memberIds
+ Information about a Nova project, as parsed through SAX.
+
+ **Fields include**
+
+ * projectname
+ * description
+ * projectManagerId
+ * memberIds
+
"""
def __init__(self, connection=None):
@@ -127,8 +137,11 @@ class ProjectInfo(object):
class ProjectMember(object):
"""
Information about a Nova project member, as parsed through SAX.
- Fields include:
- memberId
+
+ **Fields include**
+
+ * memberId
+
"""
def __init__(self, connection=None):
@@ -150,14 +163,18 @@ class ProjectMember(object):
class HostInfo(object):
"""
- Information about a Nova Host, as parsed through SAX:
- Disk stats
- Running Instances
- Memory stats
- CPU stats
- Network address info
- Firewall info
- Bridge and devices
+ Information about a Nova Host, as parsed through SAX.
+
+ **Fields Include**
+
+ * Disk stats
+ * Running Instances
+ * Memory stats
+ * CPU stats
+ * Network address info
+ * Firewall info
+ * Bridge and devices
+
"""
def __init__(self, connection=None):
@@ -177,9 +194,13 @@ class HostInfo(object):
class NovaAdminClient(object):
- def __init__(self, clc_url=DEFAULT_CLC_URL, region=DEFAULT_REGION,
- access_key=DEFAULT_ACCESS_KEY, secret_key=DEFAULT_SECRET_KEY,
- **kwargs):
+ def __init__(
+ self,
+ clc_url=DEFAULT_CLC_URL,
+ region=DEFAULT_REGION,
+ access_key=FLAGS.aws_access_key_id,
+ secret_key=FLAGS.aws_secret_access_key,
+ **kwargs):
parts = self.split_clc_url(clc_url)
self.clc_url = clc_url
@@ -257,9 +278,12 @@ class NovaAdminClient(object):
[('item', UserRole)])
def get_user_roles(self, user, project=None):
- """Returns a list of roles for the given user. Omitting project will
- return any global roles that the user has. Specifying project will
- return only project specific roles."""
+ """Returns a list of roles for the given user.
+
+ Omitting project will return any global roles that the user has.
+ Specifying project will return only project specific roles.
+
+ """
params = {'User': user}
if project:
params['Project'] = project
diff --git a/nova/api/__init__.py b/nova/api/__init__.py
index 707c1623e..80f9f2109 100644
--- a/nova/api/__init__.py
+++ b/nova/api/__init__.py
@@ -15,15 +15,21 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
Root WSGI middleware for all API controllers.
+
+**Related Flags**
+
+:osapi_subdomain: subdomain running the OpenStack API (default: api)
+:ec2api_subdomain: subdomain running the EC2 API (default: ec2)
+
"""
import routes
import webob.dec
from nova import flags
+from nova import utils
from nova import wsgi
from nova.api import cloudpipe
from nova.api import ec2
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index c53ce6f5e..a6ee16c33 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -15,8 +15,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+"""
+Starting point for routing EC2 requests.
-"""Starting point for routing EC2 requests"""
+"""
import logging
import routes
@@ -242,8 +244,8 @@ class Executor(wsgi.Application):
resp = webob.Response()
resp.status = 400
resp.headers['Content-Type'] = 'text/xml'
- resp.body = ('<?xml version="1.0"?>\n'
+ resp.body = str('<?xml version="1.0"?>\n'
'<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
- '<RequestID>?</RequestID></Response>') % (code, message)
+ '<RequestID>?</RequestID></Response>' % (code, message))
return resp
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index fbe4caa48..9327bf0d4 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -41,7 +41,7 @@ from nova import rpc
from nova import utils
from nova.compute.instance_types import INSTANCE_TYPES
from nova.api import cloud
-from nova.api.ec2 import images
+from nova.image.s3 import S3ImageService
FLAGS = flags.FLAGS
@@ -100,6 +100,7 @@ class CloudController(object):
def __init__(self):
self.network_manager = utils.import_object(FLAGS.network_manager)
self.compute_manager = utils.import_object(FLAGS.compute_manager)
+ self.image_service = S3ImageService()
self.setup()
def __str__(self):
@@ -679,7 +680,7 @@ class CloudController(object):
context.project_id)
for floating_ip_ref in iterator:
address = floating_ip_ref['address']
- instance_id = None
+ ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
@@ -717,8 +718,8 @@ class CloudController(object):
"args": {"floating_address": floating_ip_ref['address']}})
return {'releaseResponse': ["Address released."]}
- def associate_address(self, context, ec2_id, public_ip, **kwargs):
- internal_id = ec2_id_to_internal_id(ec2_id)
+ def associate_address(self, context, instance_id, public_ip, **kwargs):
+ internal_id = ec2_id_to_internal_id(instance_id)
instance_ref = db.instance_get_by_internal_id(context, internal_id)
fixed_address = db.instance_get_fixed_address(context,
instance_ref['id'])
@@ -785,7 +786,7 @@ class CloudController(object):
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
if not vpn:
- image = images.get(context, kwargs['image_id'])
+ image = self.image_service.show(context, kwargs['image_id'])
# FIXME(ja): if image is vpn, this breaks
# get defaults from imagestore
@@ -798,8 +799,8 @@ class CloudController(object):
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
# make sure we have access to kernel and ramdisk
- images.get(context, kernel_id)
- images.get(context, ramdisk_id)
+ self.image_service.show(context, kernel_id)
+ self.image_service.show(context, ramdisk_id)
logging.debug("Going to run %s instances...", num_instances)
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
@@ -993,20 +994,17 @@ class CloudController(object):
return True
def describe_images(self, context, image_id=None, **kwargs):
- # The objectstore does its own authorization for describe
- imageSet = images.list(context, image_id)
+ imageSet = self.image_service.index(context, image_id)
return {'imagesSet': imageSet}
def deregister_image(self, context, image_id, **kwargs):
- # FIXME: should the objectstore be doing these authorization checks?
- images.deregister(context, image_id)
+ self.image_service.deregister(context, image_id)
return {'imageId': image_id}
def register_image(self, context, image_location=None, **kwargs):
- # FIXME: should the objectstore be doing these authorization checks?
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
- image_id = images.register(context, image_location)
+ image_id = self.image_service.register(context, image_location)
logging.debug("Registered %s as %s" % (image_location, image_id))
return {'imageId': image_id}
@@ -1014,7 +1012,7 @@ class CloudController(object):
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
try:
- image = images.list(context, image_id)[0]
+ image = self.image_service.show(context, image_id)
except IndexError:
raise exception.ApiError('invalid id: %s' % image_id)
result = {'image_id': image_id, 'launchPermission': []}
@@ -1033,8 +1031,8 @@ class CloudController(object):
raise exception.ApiError('only group "all" is supported')
if not operation_type in ['add', 'remove']:
raise exception.ApiError('operation_type must be add or remove')
- return images.modify(context, image_id, operation_type)
+ return self.image_service.modify(context, image_id, operation_type)
def update_image(self, context, image_id, **kwargs):
- result = images.update(context, image_id, dict(kwargs))
+ result = self.image_service.update(context, image_id, dict(kwargs))
return result
diff --git a/nova/api/ec2/images.py b/nova/api/ec2/images.py
deleted file mode 100644
index 60f9008e9..000000000
--- a/nova/api/ec2/images.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Proxy AMI-related calls from the cloud controller, to the running
-objectstore service.
-"""
-
-import json
-import urllib
-
-import boto.s3.connection
-
-from nova import exception
-from nova import flags
-from nova import utils
-from nova.auth import manager
-
-
-FLAGS = flags.FLAGS
-
-
-def modify(context, image_id, operation):
- conn(context).make_request(
- method='POST',
- bucket='_images',
- query_args=qs({'image_id': image_id, 'operation': operation}))
-
- return True
-
-
-def update(context, image_id, attributes):
- """update an image's attributes / info.json"""
- attributes.update({"image_id": image_id})
- conn(context).make_request(
- method='POST',
- bucket='_images',
- query_args=qs(attributes))
- return True
-
-
-def register(context, image_location):
- """ rpc call to register a new image based from a manifest """
-
- image_id = utils.generate_uid('ami')
- conn(context).make_request(
- method='PUT',
- bucket='_images',
- query_args=qs({'image_location': image_location,
- 'image_id': image_id}))
-
- return image_id
-
-
-def list(context, filter_list=[]):
- """ return a list of all images that a user can see
-
- optionally filtered by a list of image_id """
-
- if FLAGS.connection_type == 'fake':
- return [{'imageId': 'bar'}]
-
- # FIXME: send along the list of only_images to check for
- response = conn(context).make_request(
- method='GET',
- bucket='_images')
-
- result = json.loads(response.read())
- if not filter_list is None:
- return [i for i in result if i['imageId'] in filter_list]
- return result
-
-
-def get(context, image_id):
- """return a image object if the context has permissions"""
- result = list(context, [image_id])
- if not result:
- raise exception.NotFound('Image %s could not be found' % image_id)
- image = result[0]
- return image
-
-
-def deregister(context, image_id):
- """ unregister an image """
- conn(context).make_request(
- method='DELETE',
- bucket='_images',
- query_args=qs({'image_id': image_id}))
-
-
-def conn(context):
- access = manager.AuthManager().get_access_key(context.user,
- context.project)
- secret = str(context.user.secret)
- calling = boto.s3.connection.OrdinaryCallingFormat()
- return boto.s3.connection.S3Connection(aws_access_key_id=access,
- aws_secret_access_key=secret,
- is_secure=False,
- calling_format=calling,
- port=FLAGS.s3_port,
- host=FLAGS.s3_host)
-
-
-def qs(params):
- pairs = []
- for key in params.keys():
- pairs.append(key + '=' + urllib.quote(params[key]))
- return '&'.join(pairs)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 5bc915e63..cdbdc9bdd 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -17,6 +17,7 @@
from webob import exc
+from nova import context
from nova import flags
from nova import utils
from nova import wsgi
@@ -46,19 +47,23 @@ class Controller(wsgi.Controller):
def detail(self, req):
"""Return all public images in detail."""
+ user_id = req.environ['nova.context']['user']['id']
+ ctxt = context.RequestContext(user_id, user_id)
try:
- images = self._service.detail()
+ images = self._service.detail(ctxt)
images = nova.api.openstack.limited(images, req)
except NotImplementedError:
# Emulate detail() using repeated calls to show()
- images = self._service.index()
+ images = self._service.index(ctxt)
images = nova.api.openstack.limited(images, req)
- images = [self._service.show(i['id']) for i in images]
+ images = [self._service.show(ctxt, i['id']) for i in images]
return dict(images=images)
def show(self, req, id):
"""Return data about the given image id."""
- return dict(image=self._service.show(id))
+ user_id = req.environ['nova.context']['user']['id']
+ ctxt = context.RequestContext(user_id, user_id)
+ return dict(image=self._service.show(ctxt, id))
def delete(self, req, id):
# Only public images are supported for now.
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index cf3a84a5d..46e0135b4 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -15,12 +15,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Fake LDAP server for test harnesses.
+"""Fake LDAP server for test harness, backs to ReDIS.
This class does very little error checking, and knows nothing about ldap
-class definitions. It implements the minimum emulation of the python ldap
+class definitions. It implements the minimum emulation of the python ldap
library to work with nova.
+
"""
import json
@@ -77,9 +77,8 @@ def initialize(_uri):
def _match_query(query, attrs):
"""Match an ldap query to an attribute dictionary.
- &, |, and ! are supported in the query. No syntax checking is performed,
- so malformed querys will not work correctly.
-
+ The characters &, |, and ! are supported in the query. No syntax checking
+ is performed, so malformed querys will not work correctly.
"""
# cut off the parentheses
inner = query[1:-1]
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 001a96875..7b2b68161 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -84,12 +84,11 @@ class AuthBase(object):
@classmethod
def safe_id(cls, obj):
- """Safe get object id
+ """Safely get object id.
This method will return the id of the object if the object
is of this class, otherwise it will return the original object.
This allows methods to accept objects or ids as paramaters.
-
"""
if isinstance(obj, cls):
return obj.id
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index e362b4507..4338d39f0 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -15,10 +15,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
Utility methods to resize, repartition, and modify disk images.
+
Includes injection of SSH PGP keys into authorized_keys file.
+
"""
import logging
@@ -41,20 +42,23 @@ flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
@defer.inlineCallbacks
def partition(infile, outfile, local_bytes=0, resize=True,
local_type='ext2', execute=None):
- """Takes a single partition represented by infile and writes a bootable
- drive image into outfile.
+ """
+ Turns a partition (infile) into a bootable drive image (outfile).
The first 63 sectors (0-62) of the resulting image is a master boot record.
Infile becomes the first primary partition.
If local bytes is specified, a second primary partition is created and
formatted as ext2.
- In the diagram below, dashes represent drive sectors.
- +-----+------. . .-------+------. . .------+
- | 0 a| b c|d e|
- +-----+------. . .-------+------. . .------+
- | mbr | primary partiton | local partition |
- +-----+------. . .-------+------. . .------+
+ ::
+
+ In the diagram below, dashes represent drive sectors.
+ +-----+------. . .-------+------. . .------+
+ | 0 a| b c|d e|
+ +-----+------. . .-------+------. . .------+
+ | mbr | primary partiton | local partition |
+ +-----+------. . .-------+------. . .------+
+
"""
sector_size = 512
file_size = os.path.getsize(infile)
@@ -161,6 +165,11 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
@defer.inlineCallbacks
def _inject_key_into_fs(key, fs, execute=None):
+ """Add the given public ssh key to root's authorized_keys.
+
+ key is an ssh key string.
+ fs is the path to the base of the filesystem into which to inject the key.
+ """
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
yield execute('sudo chown root %s' % sshdir)
@@ -171,6 +180,13 @@ def _inject_key_into_fs(key, fs, execute=None):
@defer.inlineCallbacks
def _inject_net_into_fs(net, fs, execute=None):
- netfile = os.path.join(os.path.join(os.path.join(
- fs, 'etc'), 'network'), 'interfaces')
+ """Inject /etc/network/interfaces into the filesystem rooted at fs.
+
+ net is the contents of /etc/network/interfaces.
+ """
+ netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
+ yield execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
+ yield execute('sudo chown root:root %s' % netdir)
+ yield execute('sudo chmod 755 %s' % netdir)
+ netfile = os.path.join(netdir, 'interfaces')
yield execute('sudo tee %s' % netfile, net)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 65fa50431..50a9d316b 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -17,7 +17,21 @@
# under the License.
"""
-Handles all code relating to instances (guest vms)
+Handles all processes relating to instances (guest vms).
+
+The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
+handles RPC calls relating to creating instances. It is responsible for
+building a disk image, launching it via the underlying virtualization driver,
+responding to calls to check it state, attaching persistent as well as
+termination.
+
+**Related Flags**
+
+:instances_path: Where instances are kept on disk
+:compute_driver: Name of class that is used to handle virtualization, loaded
+ by :func:`nova.utils.import_object`
+:volume_manager: Name of class that handles persistent storage, loaded by
+ :func:`nova.utils.import_object`
"""
import datetime
@@ -40,12 +54,12 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
class ComputeManager(manager.Manager):
- """
- Manages the running instances.
- """
+ """Manages the running instances from creation to destruction."""
+
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
# TODO(vish): sync driver creation logic with the rest of the system
+ # and redocument the module docstring
if not compute_driver:
compute_driver = FLAGS.compute_driver
self.driver = utils.import_object(compute_driver)
@@ -54,7 +68,7 @@ class ComputeManager(manager.Manager):
super(ComputeManager, self).__init__(*args, **kwargs)
def _update_state(self, context, instance_id):
- """Update the state of an instance from the driver info"""
+ """Update the state of an instance from the driver info."""
# FIXME(ja): include other fields from state?
instance_ref = self.db.instance_get(context, instance_id)
try:
@@ -67,6 +81,7 @@ class ComputeManager(manager.Manager):
@defer.inlineCallbacks
@exception.wrap_exception
def refresh_security_group(self, context, security_group_id, **_kwargs):
+ """This call passes stright through to the virtualization driver."""
yield self.driver.refresh_security_group(security_group_id)
def create_instance(self, context, security_groups=None, **kwargs):
@@ -76,9 +91,9 @@ class ComputeManager(manager.Manager):
:param context: The security context
:param security_groups: list of security group ids to
attach to the instance
- :param **kwargs: All additional keyword args are treated
- as data fields of the instance to be
- created
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ created
:retval Returns a mapping of the instance information
that has just been created
@@ -97,13 +112,13 @@ class ComputeManager(manager.Manager):
return instance_ref
def update_instance(self, context, instance_id, **kwargs):
- """Updates the instance in the datastore
+ """Updates the instance in the datastore.
:param context: The security context
:param instance_id: ID of the instance to update
- :param **kwargs: All additional keyword args are treated
- as data fields of the instance to be
- updated
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ updated
:retval None
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 024f3ed3c..22653113a 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -211,8 +211,8 @@ def store_graph(instance_id, filename):
# the response we can make our own client that does the actual
# request and hands it off to the response parser.
s3 = boto.s3.connection.S3Connection(
- aws_access_key_id='admin',
- aws_secret_access_key='admin',
+ aws_access_key_id=FLAGS.aws_access_key_id,
+ aws_secret_access_key=FLAGS.aws_secret_access_key,
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
port=FLAGS.s3_port,
diff --git a/nova/crypto.py b/nova/crypto.py
index 045f7f53f..aacc50b17 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -15,10 +15,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
-Wrappers around standard crypto, including root and intermediate CAs,
-SSH key_pairs and x509 certificates.
+Wrappers around standard crypto data elements.
+
+Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
+
"""
import base64
@@ -227,12 +228,12 @@ def mkcacert(subject='nova', years=1):
def compute_md5(fp):
"""
- @type fp: file
- @param fp: File pointer to the file to MD5 hash. The file pointer will be
+ :type fp: file
+ :param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
- @rtype: tuple
- @return: the hex digest version of the MD5 hash
+ :rtype: tuple
+ :return: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
diff --git a/nova/db/api.py b/nova/db/api.py
index 80563c452..8f9dc2443 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -16,7 +16,17 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Defines interface for DB access
+Defines interface for DB access.
+
+The underlying driver is loaded as a :class:`LazyPluggable`.
+
+**Related Flags**
+
+:db_backend: string to lookup in the list of LazyPluggable backends.
+ `sqlalchemy` is the only supported backend right now.
+
+:sql_connection: string specifying the sqlalchemy connection to use, like:
+ `sqlite:///var/lib/nova/nova.sqlite`.
"""
from nova import exception
@@ -34,17 +44,17 @@ IMPL = utils.LazyPluggable(FLAGS['db_backend'],
class NoMoreAddresses(exception.Error):
- """No more available addresses"""
+ """No more available addresses."""
pass
class NoMoreBlades(exception.Error):
- """No more available blades"""
+ """No more available blades."""
pass
class NoMoreNetworks(exception.Error):
- """No more available networks"""
+ """No more available networks."""
pass
@@ -67,30 +77,33 @@ def service_get(context, service_id):
def service_get_all_by_topic(context, topic):
- """Get all compute services for a given topic """
+ """Get all compute services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_compute_sorted(context):
- """Get all compute services sorted by instance count
+ """Get all compute services sorted by instance count.
+
+ Returns a list of (Service, instance_count) tuples.
- Returns a list of (Service, instance_count) tuples
"""
return IMPL.service_get_all_compute_sorted(context)
def service_get_all_network_sorted(context):
- """Get all network services sorted by network count
+ """Get all network services sorted by network count.
+
+ Returns a list of (Service, network_count) tuples.
- Returns a list of (Service, network_count) tuples
"""
return IMPL.service_get_all_network_sorted(context)
def service_get_all_volume_sorted(context):
- """Get all volume services sorted by volume count
+ """Get all volume services sorted by volume count.
+
+ Returns a list of (Service, volume_count) tuples.
- Returns a list of (Service, volume_count) tuples
"""
return IMPL.service_get_all_volume_sorted(context)
@@ -121,6 +134,7 @@ def floating_ip_allocate_address(context, host, project_id):
"""Allocate free floating ip and return the address.
Raises if one is not available.
+
"""
return IMPL.floating_ip_allocate_address(context, host, project_id)
@@ -149,6 +163,7 @@ def floating_ip_disassociate(context, address):
"""Disassociate an floating ip from a fixed ip by address.
Returns the address of the existing fixed ip.
+
"""
return IMPL.floating_ip_disassociate(context, address)
@@ -187,6 +202,7 @@ def fixed_ip_associate(context, address, instance_id):
"""Associate fixed ip to instance.
Raises if fixed ip is not available.
+
"""
return IMPL.fixed_ip_associate(context, address, instance_id)
@@ -195,6 +211,7 @@ def fixed_ip_associate_pool(context, network_id, instance_id):
"""Find free ip in network and associate it to instance.
Raises if one is not available.
+
"""
return IMPL.fixed_ip_associate_pool(context, network_id, instance_id)
@@ -210,7 +227,7 @@ def fixed_ip_disassociate(context, address):
def fixed_ip_disassociate_all_by_timeout(context, host, time):
- """Disassociate old fixed ips from host"""
+ """Disassociate old fixed ips from host."""
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
@@ -288,7 +305,7 @@ def instance_get_floating_address(context, instance_id):
def instance_get_by_internal_id(context, internal_id):
- """Get an instance by ec2 id."""
+ """Get an instance by internal id."""
return IMPL.instance_get_by_internal_id(context, internal_id)
@@ -312,7 +329,7 @@ def instance_update(context, instance_id, values):
def instance_add_security_group(context, instance_id, security_group_id):
- """Associate the given security group with the given instance"""
+ """Associate the given security group with the given instance."""
return IMPL.instance_add_security_group(context, instance_id,
security_group_id)
@@ -374,10 +391,12 @@ def network_count_reserved_ips(context, network_id):
def network_create_safe(context, values):
- """Create a network from the values dict
+ """Create a network from the values dict.
The network is only returned if the create succeeds. If the create violates
- constraints because the network already exists, no exception is raised."""
+ constraints because the network already exists, no exception is raised.
+
+ """
return IMPL.network_create_safe(context, values)
@@ -418,22 +437,22 @@ def network_get_by_instance(context, instance_id):
def network_get_index(context, network_id):
- """Get non-conflicting index for network"""
+ """Get non-conflicting index for network."""
return IMPL.network_get_index(context, network_id)
def network_get_vpn_ip(context, network_id):
- """Get non-conflicting index for network"""
+ """Get non-conflicting index for network."""
return IMPL.network_get_vpn_ip(context, network_id)
def network_set_cidr(context, network_id, cidr):
- """Set the Classless Inner Domain Routing for the network"""
+ """Set the Classless Inner Domain Routing for the network."""
return IMPL.network_set_cidr(context, network_id, cidr)
def network_set_host(context, network_id, host_id):
- """Safely set the host for network"""
+ """Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
@@ -479,7 +498,9 @@ def export_device_create_safe(context, values):
The device is not returned. If the create violates the unique
constraints because the shelf_id and blade_id already exist,
- no exception is raised."""
+ no exception is raised.
+
+ """
return IMPL.export_device_create_safe(context, values)
@@ -504,17 +525,17 @@ def iscsi_target_create_safe(context, values):
def auth_destroy_token(context, token):
- """Destroy an auth token"""
+ """Destroy an auth token."""
return IMPL.auth_destroy_token(context, token)
def auth_get_token(context, token_hash):
- """Retrieves a token given the hash representing it"""
+ """Retrieves a token given the hash representing it."""
return IMPL.auth_get_token(context, token_hash)
def auth_create_token(context, token):
- """Creates a new token"""
+ """Creates a new token."""
return IMPL.auth_create_token(context, token)
@@ -632,47 +653,47 @@ def volume_update(context, volume_id, values):
def security_group_get_all(context):
- """Get all security groups"""
+ """Get all security groups."""
return IMPL.security_group_get_all(context)
def security_group_get(context, security_group_id):
- """Get security group by its internal id"""
+ """Get security group by its internal id."""
return IMPL.security_group_get(context, security_group_id)
def security_group_get_by_name(context, project_id, group_name):
- """Returns a security group with the specified name from a project"""
+ """Returns a security group with the specified name from a project."""
return IMPL.security_group_get_by_name(context, project_id, group_name)
def security_group_get_by_project(context, project_id):
- """Get all security groups belonging to a project"""
+ """Get all security groups belonging to a project."""
return IMPL.security_group_get_by_project(context, project_id)
def security_group_get_by_instance(context, instance_id):
- """Get security groups to which the instance is assigned"""
+ """Get security groups to which the instance is assigned."""
return IMPL.security_group_get_by_instance(context, instance_id)
def security_group_exists(context, project_id, group_name):
- """Indicates if a group name exists in a project"""
+ """Indicates if a group name exists in a project."""
return IMPL.security_group_exists(context, project_id, group_name)
def security_group_create(context, values):
- """Create a new security group"""
+ """Create a new security group."""
return IMPL.security_group_create(context, values)
def security_group_destroy(context, security_group_id):
- """Deletes a security group"""
+ """Deletes a security group."""
return IMPL.security_group_destroy(context, security_group_id)
def security_group_destroy_all(context):
- """Deletes a security group"""
+ """Deletes a security group."""
return IMPL.security_group_destroy_all(context)
@@ -680,18 +701,18 @@ def security_group_destroy_all(context):
def security_group_rule_create(context, values):
- """Create a new security group"""
+ """Create a new security group."""
return IMPL.security_group_rule_create(context, values)
def security_group_rule_get_by_security_group(context, security_group_id):
- """Get all rules for a a given security group"""
+ """Get all rules for a a given security group."""
return IMPL.security_group_rule_get_by_security_group(context,
security_group_id)
def security_group_rule_destroy(context, security_group_rule_id):
- """Deletes a security group rule"""
+ """Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
@@ -699,107 +720,107 @@ def security_group_rule_destroy(context, security_group_rule_id):
def user_get(context, id):
- """Get user by id"""
+ """Get user by id."""
return IMPL.user_get(context, id)
def user_get_by_uid(context, uid):
- """Get user by uid"""
+ """Get user by uid."""
return IMPL.user_get_by_uid(context, uid)
def user_get_by_access_key(context, access_key):
- """Get user by access key"""
+ """Get user by access key."""
return IMPL.user_get_by_access_key(context, access_key)
def user_create(context, values):
- """Create a new user"""
+ """Create a new user."""
return IMPL.user_create(context, values)
def user_delete(context, id):
- """Delete a user"""
+ """Delete a user."""
return IMPL.user_delete(context, id)
def user_get_all(context):
- """Create a new user"""
+ """Create a new user."""
return IMPL.user_get_all(context)
def user_add_role(context, user_id, role):
- """Add another global role for user"""
+ """Add another global role for user."""
return IMPL.user_add_role(context, user_id, role)
def user_remove_role(context, user_id, role):
- """Remove global role from user"""
+ """Remove global role from user."""
return IMPL.user_remove_role(context, user_id, role)
def user_get_roles(context, user_id):
- """Get global roles for user"""
+ """Get global roles for user."""
return IMPL.user_get_roles(context, user_id)
def user_add_project_role(context, user_id, project_id, role):
- """Add project role for user"""
+ """Add project role for user."""
return IMPL.user_add_project_role(context, user_id, project_id, role)
def user_remove_project_role(context, user_id, project_id, role):
- """Remove project role from user"""
+ """Remove project role from user."""
return IMPL.user_remove_project_role(context, user_id, project_id, role)
def user_get_roles_for_project(context, user_id, project_id):
- """Return list of roles a user holds on project"""
+ """Return list of roles a user holds on project."""
return IMPL.user_get_roles_for_project(context, user_id, project_id)
def user_update(context, user_id, values):
- """Update user"""
+ """Update user."""
return IMPL.user_update(context, user_id, values)
def project_get(context, id):
- """Get project by id"""
+ """Get project by id."""
return IMPL.project_get(context, id)
def project_create(context, values):
- """Create a new project"""
+ """Create a new project."""
return IMPL.project_create(context, values)
def project_add_member(context, project_id, user_id):
- """Add user to project"""
+ """Add user to project."""
return IMPL.project_add_member(context, project_id, user_id)
def project_get_all(context):
- """Get all projects"""
+ """Get all projects."""
return IMPL.project_get_all(context)
def project_get_by_user(context, user_id):
- """Get all projects of which the given user is a member"""
+ """Get all projects of which the given user is a member."""
return IMPL.project_get_by_user(context, user_id)
def project_remove_member(context, project_id, user_id):
- """Remove the given user from the given project"""
+ """Remove the given user from the given project."""
return IMPL.project_remove_member(context, project_id, user_id)
def project_update(context, project_id, values):
- """Update Remove the given user from the given project"""
+ """Update Remove the given user from the given project."""
return IMPL.project_update(context, project_id, values)
def project_delete(context, project_id):
- """Delete project"""
+ """Delete project."""
return IMPL.project_delete(context, project_id)
@@ -808,6 +829,7 @@ def project_delete(context, project_id):
def host_get_networks(context, host):
"""Return all networks for which the given host is the designated
- network host
+ network host.
+
"""
return IMPL.host_get_networks(context, host)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index db4d9f68f..afa55fc03 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Implementation of SQLAlchemy backend
+Implementation of SQLAlchemy backend.
"""
import random
@@ -44,7 +44,6 @@ def is_admin_context(context):
warnings.warn('Use of empty request context is deprecated',
DeprecationWarning)
raise Exception('die')
- return True
return context.is_admin
@@ -390,7 +389,7 @@ def floating_ip_get_by_address(context, address, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound('No fixed ip for address %s' % address)
+ raise exception.NotFound('No floating ip for address %s' % address)
return result
@@ -502,14 +501,14 @@ def fixed_ip_get_by_address(context, address, session=None):
@require_context
def fixed_ip_get_instance(context, address):
- fixed_ip_ref = fixed_ip_get_by_address(context, address)
- return fixed_ip_ref.instance
+ fixed_ip_ref = fixed_ip_get_by_address(context, address)
+ return fixed_ip_ref.instance
@require_admin_context
def fixed_ip_get_network(context, address):
- fixed_ip_ref = fixed_ip_get_by_address(context, address)
- return fixed_ip_ref.network
+ fixed_ip_ref = fixed_ip_get_by_address(context, address)
+ return fixed_ip_ref.network
@require_context
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 1111b5cbd..01b5cf350 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -15,9 +15,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
-SQLAlchemy models for nova data
+SQLAlchemy models for nova data.
"""
import datetime
@@ -35,13 +34,13 @@ from nova import auth
from nova import exception
from nova import flags
-FLAGS = flags.FLAGS
+FLAGS = flags.FLAGS
BASE = declarative_base()
class NovaBase(object):
- """Base class for Nova Models"""
+ """Base class for Nova Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
created_at = Column(DateTime, default=datetime.datetime.utcnow)
@@ -50,7 +49,7 @@ class NovaBase(object):
deleted = Column(Boolean, default=False)
def save(self, session=None):
- """Save this object"""
+ """Save this object."""
if not session:
session = get_session()
session.add(self)
@@ -63,7 +62,7 @@ class NovaBase(object):
raise
def delete(self, session=None):
- """Delete this object"""
+ """Delete this object."""
self.deleted = True
self.deleted_at = datetime.datetime.utcnow()
self.save(session=session)
@@ -141,7 +140,8 @@ class NovaBase(object):
class Service(BASE, NovaBase):
- """Represents a running service on a host"""
+ """Represents a running service on a host."""
+
__tablename__ = 'services'
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
@@ -152,7 +152,7 @@ class Service(BASE, NovaBase):
class Instance(BASE, NovaBase):
- """Represents a guest vm"""
+ """Represents a guest vm."""
__tablename__ = 'instances'
id = Column(Integer, primary_key=True)
internal_id = Column(Integer, unique=True)
@@ -228,7 +228,7 @@ class Instance(BASE, NovaBase):
class Volume(BASE, NovaBase):
- """Represents a block storage device that can be attached to a vm"""
+ """Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
id = Column(Integer, primary_key=True)
ec2_id = Column(String(12), unique=True)
@@ -263,7 +263,7 @@ class Volume(BASE, NovaBase):
class Quota(BASE, NovaBase):
- """Represents quota overrides for a project"""
+ """Represents quota overrides for a project."""
__tablename__ = 'quotas'
id = Column(Integer, primary_key=True)
@@ -277,7 +277,7 @@ class Quota(BASE, NovaBase):
class ExportDevice(BASE, NovaBase):
- """Represates a shelf and blade that a volume can be exported on"""
+ """Represates a shelf and blade that a volume can be exported on."""
__tablename__ = 'export_devices'
__table_args__ = (schema.UniqueConstraint("shelf_id", "blade_id"),
{'mysql_engine': 'InnoDB'})
@@ -316,7 +316,7 @@ class SecurityGroupInstanceAssociation(BASE, NovaBase):
class SecurityGroup(BASE, NovaBase):
- """Represents a security group"""
+ """Represents a security group."""
__tablename__ = 'security_groups'
id = Column(Integer, primary_key=True)
@@ -346,7 +346,7 @@ class SecurityGroup(BASE, NovaBase):
class SecurityGroupIngressRule(BASE, NovaBase):
- """Represents a rule in a security group"""
+ """Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
id = Column(Integer, primary_key=True)
@@ -368,7 +368,7 @@ class SecurityGroupIngressRule(BASE, NovaBase):
class KeyPair(BASE, NovaBase):
- """Represents a public key pair for ssh"""
+ """Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
id = Column(Integer, primary_key=True)
@@ -381,7 +381,7 @@ class KeyPair(BASE, NovaBase):
class Network(BASE, NovaBase):
- """Represents a network"""
+ """Represents a network."""
__tablename__ = 'networks'
__table_args__ = (schema.UniqueConstraint("vpn_public_address",
"vpn_public_port"),
@@ -410,9 +410,12 @@ class Network(BASE, NovaBase):
class AuthToken(BASE, NovaBase):
- """Represents an authorization token for all API transactions. Fields
- are a string representing the actual token and a user id for mapping
- to the actual user"""
+ """Represents an authorization token for all API transactions.
+
+ Fields are a string representing the actual token and a user id for
+ mapping to the actual user
+
+ """
__tablename__ = 'auth_tokens'
token_hash = Column(String(255), primary_key=True)
user_id = Column(Integer)
@@ -423,7 +426,7 @@ class AuthToken(BASE, NovaBase):
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
- """Represents a fixed ip for an instance"""
+ """Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
@@ -442,7 +445,7 @@ class FixedIp(BASE, NovaBase):
class User(BASE, NovaBase):
- """Represents a user"""
+ """Represents a user."""
__tablename__ = 'users'
id = Column(String(255), primary_key=True)
@@ -454,7 +457,7 @@ class User(BASE, NovaBase):
class Project(BASE, NovaBase):
- """Represents a project"""
+ """Represents a project."""
__tablename__ = 'projects'
id = Column(String(255), primary_key=True)
name = Column(String(255))
@@ -502,7 +505,7 @@ class UserProjectAssociation(BASE, NovaBase):
class FloatingIp(BASE, NovaBase):
- """Represents a floating ip that dynamically forwards to a fixed ip"""
+ """Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
@@ -518,7 +521,11 @@ class FloatingIp(BASE, NovaBase):
def register_models():
- """Register Models and create metadata"""
+ """Register Models and create metadata.
+
+ Called from nova.db.sqlalchemy.__init__ as part of loading the driver,
+ it will never need to be called explicitly elsewhere.
+ """
from sqlalchemy import create_engine
models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp,
FloatingIp, Network, SecurityGroup,
diff --git a/nova/flags.py b/nova/flags.py
index 2b8bbbdb7..70a049491 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -194,6 +194,8 @@ DEFINE_list('region_list',
[],
'list of region=url pairs separated by commas')
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
+DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
+DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
@@ -211,6 +213,8 @@ DEFINE_integer('rabbit_port', 5672, 'rabbit port')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
+DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
+DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('cc_host', '127.0.0.1', 'ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
@@ -250,7 +254,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
# The service to use for image search and retrieval
-DEFINE_string('image_service', 'nova.image.service.LocalImageService',
+DEFINE_string('image_service', 'nova.image.local.LocalImageService',
'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(),
diff --git a/nova/image/services/glance/__init__.py b/nova/image/glance.py
index f1d05f0bc..1ca6cf2eb 100644
--- a/nova/image/services/glance/__init__.py
+++ b/nova/image/glance.py
@@ -19,6 +19,7 @@
import httplib
import json
+import logging
import urlparse
import webob.exc
@@ -31,6 +32,17 @@ import nova.image.service
FLAGS = flags.FLAGS
+flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1',
+ 'IP address or URL where Glance\'s Teller service resides')
+flags.DEFINE_string('glance_teller_port', '9191',
+ 'Port for Glance\'s Teller service')
+flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1',
+ 'IP address or URL where Glance\'s Parallax service '
+ 'resides')
+flags.DEFINE_string('glance_parallax_port', '9292',
+ 'Port for Glance\'s Parallax service')
+
+
class TellerClient(object):
def __init__(self):
@@ -160,21 +172,21 @@ class GlanceImageService(nova.image.service.BaseImageService):
self.teller = TellerClient()
self.parallax = ParallaxClient()
- def index(self):
+ def index(self, context):
"""
Calls out to Parallax for a list of images available
"""
images = self.parallax.get_image_index()
return images
- def detail(self):
+ def detail(self, context):
"""
Calls out to Parallax for a list of detailed image information
"""
images = self.parallax.get_image_details()
return images
- def show(self, id):
+ def show(self, context, id):
"""
Returns a dict containing image data for the given opaque image id.
"""
@@ -183,7 +195,7 @@ class GlanceImageService(nova.image.service.BaseImageService):
return image
raise exception.NotFound
- def create(self, data):
+ def create(self, context, data):
"""
Store the image data and return the new image id.
@@ -192,7 +204,7 @@ class GlanceImageService(nova.image.service.BaseImageService):
"""
return self.parallax.add_image_metadata(data)
- def update(self, image_id, data):
+ def update(self, context, image_id, data):
"""Replace the contents of the given image with the new data.
:raises NotFound if the image does not exist.
@@ -200,7 +212,7 @@ class GlanceImageService(nova.image.service.BaseImageService):
"""
self.parallax.update_image_metadata(image_id, data)
- def delete(self, image_id):
+ def delete(self, context, image_id):
"""
Delete the given image.
diff --git a/nova/image/local.py b/nova/image/local.py
new file mode 100644
index 000000000..9b0cdcc50
--- /dev/null
+++ b/nova/image/local.py
@@ -0,0 +1,88 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import cPickle as pickle
+import os.path
+import random
+
+from nova import exception
+from nova.image import service
+
+
+class LocalImageService(service.BaseImageService):
+
+ """Image service storing images to local disk.
+
+ It assumes that image_ids are integers."""
+
+ def __init__(self):
+ self._path = "/tmp/nova/images"
+ try:
+ os.makedirs(self._path)
+ except OSError: # Exists
+ pass
+
+ def _path_to(self, image_id):
+ return os.path.join(self._path, str(image_id))
+
+ def _ids(self):
+ """The list of all image ids."""
+ return [int(i) for i in os.listdir(self._path)]
+
+ def index(self, context):
+ return [dict(id=i['id'], name=i['name']) for i in self.detail(context)]
+
+ def detail(self, context):
+ return [self.show(context, id) for id in self._ids()]
+
+ def show(self, context, id):
+ try:
+ return pickle.load(open(self._path_to(id)))
+ except IOError:
+ raise exception.NotFound
+
+ def create(self, context, data):
+ """
+ Store the image data and return the new image id.
+ """
+ id = random.randint(0, 2 ** 32 - 1)
+ data['id'] = id
+ self.update(context, id, data)
+ return id
+
+ def update(self, context, image_id, data):
+ """Replace the contents of the given image with the new data."""
+ try:
+ pickle.dump(data, open(self._path_to(image_id), 'w'))
+ except IOError:
+ raise exception.NotFound
+
+ def delete(self, context, image_id):
+ """
+ Delete the given image. Raises OSError if the image does not exist.
+ """
+ try:
+ os.unlink(self._path_to(image_id))
+ except IOError:
+ raise exception.NotFound
+
+ def delete_all(self):
+ """
+ Clears out all images in local directory
+ """
+ for id in self._ids():
+ os.unlink(self._path_to(id))
diff --git a/nova/image/s3.py b/nova/image/s3.py
new file mode 100644
index 000000000..0a25161de
--- /dev/null
+++ b/nova/image/s3.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Proxy AMI-related calls from the cloud controller, to the running
+objectstore service.
+"""
+
+import json
+import urllib
+
+import boto.s3.connection
+
+from nova import exception
+from nova import flags
+from nova import utils
+from nova.auth import manager
+from nova.image import service
+
+
+FLAGS = flags.FLAGS
+
+
+class S3ImageService(service.BaseImageService):
+
+ def modify(self, context, image_id, operation):
+ self._conn(context).make_request(
+ method='POST',
+ bucket='_images',
+ query_args=self._qs({'image_id': image_id,
+ 'operation': operation}))
+ return True
+
+ def update(self, context, image_id, attributes):
+ """update an image's attributes / info.json"""
+ attributes.update({"image_id": image_id})
+ self._conn(context).make_request(
+ method='POST',
+ bucket='_images',
+ query_args=self._qs(attributes))
+ return True
+
+ def register(self, context, image_location):
+ """ rpc call to register a new image based from a manifest """
+ image_id = utils.generate_uid('ami')
+ self._conn(context).make_request(
+ method='PUT',
+ bucket='_images',
+ query_args=self._qs({'image_location': image_location,
+ 'image_id': image_id}))
+ return image_id
+
+ def index(self, context):
+ """Return a list of all images that a user can see."""
+ response = self._conn(context).make_request(
+ method='GET',
+ bucket='_images')
+ return json.loads(response.read())
+
+ def show(self, context, image_id):
+ """return a image object if the context has permissions"""
+ if FLAGS.connection_type == 'fake':
+ return {'imageId': 'bar'}
+ result = self.index(context)
+ result = [i for i in result if i['imageId'] == image_id]
+ if not result:
+ raise exception.NotFound('Image %s could not be found' % image_id)
+ image = result[0]
+ return image
+
+ def deregister(self, context, image_id):
+ """ unregister an image """
+ self._conn(context).make_request(
+ method='DELETE',
+ bucket='_images',
+ query_args=self._qs({'image_id': image_id}))
+
+ def _conn(self, context):
+ access = manager.AuthManager().get_access_key(context.user,
+ context.project)
+ secret = str(context.user.secret)
+ calling = boto.s3.connection.OrdinaryCallingFormat()
+ return boto.s3.connection.S3Connection(aws_access_key_id=access,
+ aws_secret_access_key=secret,
+ is_secure=False,
+ calling_format=calling,
+ port=FLAGS.s3_port,
+ host=FLAGS.s3_host)
+
+ def _qs(self, params):
+ pairs = []
+ for key in params.keys():
+ pairs.append(key + '=' + urllib.quote(params[key]))
+ return '&'.join(pairs)
diff --git a/nova/image/service.py b/nova/image/service.py
index 37cadddcc..ebee2228d 100644
--- a/nova/image/service.py
+++ b/nova/image/service.py
@@ -15,64 +15,38 @@
# License for the specific language governing permissions and limitations
# under the License.
-import cPickle as pickle
-import os.path
-import random
-
-from nova import flags
-from nova import exception
-
-FLAGS = flags.FLAGS
-
-
-flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1',
- 'IP address or URL where Glance\'s Teller service resides')
-flags.DEFINE_string('glance_teller_port', '9191',
- 'Port for Glance\'s Teller service')
-flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1',
- 'IP address or URL where Glance\'s Parallax service '
- 'resides')
-flags.DEFINE_string('glance_parallax_port', '9292',
- 'Port for Glance\'s Parallax service')
-
class BaseImageService(object):
"""Base class for providing image search and retrieval services"""
- def index(self):
+ def index(self, context):
"""
Returns a sequence of mappings of id and name information about
images.
- :retval a sequence of mappings with the following signature:
-
- [
- {'id': opaque id of image,
- 'name': name of image
- }, ...
- ]
+ :rtype: array
+ :retval: a sequence of mappings with the following signature
+ {'id': opaque id of image, 'name': name of image}
"""
raise NotImplementedError
- def detail(self):
+ def detail(self, context):
"""
Returns a sequence of mappings of detailed information about images.
- :retval a sequence of mappings with the following signature:
-
- [
- {'id': opaque id of image,
- 'name': name of image,
- 'created_at': creation timestamp,
- 'updated_at': modification timestamp,
- 'deleted_at': deletion timestamp or None,
- 'deleted': boolean indicating if image has been deleted,
- 'status': string description of image status,
- 'is_public': boolean indicating if image is public
- }, ...
- ]
+ :rtype: array
+ :retval: a sequence of mappings with the following signature
+ {'id': opaque id of image,
+ 'name': name of image,
+ 'created_at': creation timestamp,
+ 'updated_at': modification timestamp,
+ 'deleted_at': deletion timestamp or None,
+ 'deleted': boolean indicating if image has been deleted,
+ 'status': string description of image status,
+ 'is_public': boolean indicating if image is public
+ }
If the service does not implement a method that provides a detailed
set of information about images, then the method should raise
@@ -82,7 +56,7 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def show(self, id):
+ def show(self, context, id):
"""
Returns a dict containing image data for the given opaque image id.
@@ -102,7 +76,7 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def create(self, data):
+ def create(self, context, data):
"""
Store the image data and return the new image id.
@@ -111,7 +85,7 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def update(self, image_id, data):
+ def update(self, context, image_id, data):
"""Replace the contents of the given image with the new data.
:raises NotFound if the image does not exist.
@@ -119,7 +93,7 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def delete(self, image_id):
+ def delete(self, context, image_id):
"""
Delete the given image.
@@ -127,68 +101,3 @@ class BaseImageService(object):
"""
raise NotImplementedError
-
-
-class LocalImageService(BaseImageService):
-
- """Image service storing images to local disk.
-
- It assumes that image_ids are integers."""
-
- def __init__(self):
- self._path = "/tmp/nova/images"
- try:
- os.makedirs(self._path)
- except OSError: # Exists
- pass
-
- def _path_to(self, image_id):
- return os.path.join(self._path, str(image_id))
-
- def _ids(self):
- """The list of all image ids."""
- return [int(i) for i in os.listdir(self._path)]
-
- def index(self):
- return [dict(id=i['id'], name=i['name']) for i in self.detail()]
-
- def detail(self):
- return [self.show(id) for id in self._ids()]
-
- def show(self, id):
- try:
- return pickle.load(open(self._path_to(id)))
- except IOError:
- raise exception.NotFound
-
- def create(self, data):
- """
- Store the image data and return the new image id.
- """
- id = random.randint(0, 2 ** 32 - 1)
- data['id'] = id
- self.update(id, data)
- return id
-
- def update(self, image_id, data):
- """Replace the contents of the given image with the new data."""
- try:
- pickle.dump(data, open(self._path_to(image_id), 'w'))
- except IOError:
- raise exception.NotFound
-
- def delete(self, image_id):
- """
- Delete the given image. Raises OSError if the image does not exist.
- """
- try:
- os.unlink(self._path_to(image_id))
- except IOError:
- raise exception.NotFound
-
- def delete_all(self):
- """
- Clears out all images in local directory
- """
- for id in self._ids():
- os.unlink(self._path_to(id))
diff --git a/nova/image/services/__init__.py b/nova/image/services/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/nova/image/services/__init__.py
+++ /dev/null
diff --git a/nova/manager.py b/nova/manager.py
index 4244b2db4..a6efb8732 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -15,8 +15,40 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
"""
-Base class for managers of different parts of the system
+Managers are responsible for a certain aspect of the sytem. It is a logical
+grouping of code relating to a portion of the system. In general other
+components should be using the manager to make changes to the components that
+it is responsible for.
+
+For example, other components that need to deal with volumes in some way,
+should do so by calling methods on the VolumeManager instead of directly
+changing fields in the database. This allows us to keep all of the code
+relating to volumes in the same place.
+
+We have adopted a basic strategy of Smart managers and dumb data, which means
+rather than attaching methods to data objects, components should call manager
+methods that act on the data.
+
+Methods on managers that can be executed locally should be called directly. If
+a particular method must execute on a remote host, this should be done via rpc
+to the service that wraps the manager
+
+Managers should be responsible for most of the db access, and
+non-implementation specific data. Anything implementation specific that can't
+be generalized should be done by the Driver.
+
+In general, we prefer to have one manager with multiple drivers for different
+implementations, but sometimes it makes sense to have multiple managers. You
+can think of it this way: Abstract different overall strategies at the manager
+level(FlatNetwork vs VlanNetwork), and different implementations at the driver
+level(LinuxNetDriver vs CiscoNetDriver).
+
+Managers will often provide methods for initial setup of a host or periodic
+tasksto a wrapping service.
+
+This module provides Manager, a base class for managers.
"""
from nova import utils
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index f504b3d29..7b00e65d4 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -42,8 +42,8 @@ flags.DEFINE_string('networks_path', '$state_path/networks',
'Location to keep network config files')
flags.DEFINE_string('public_interface', 'vlan1',
'Interface for public IP addresses')
-flags.DEFINE_string('bridge_dev', 'eth0',
- 'network device for bridges')
+flags.DEFINE_string('vlan_interface', 'eth0',
+ 'network device for vlans')
flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
'location of nova-dhcpbridge')
flags.DEFINE_string('routing_source_ip', '127.0.0.1',
@@ -54,14 +54,15 @@ flags.DEFINE_bool('use_nova_chains', False,
DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)]
-def init_host():
- """Basic networking setup goes here"""
- # NOTE(devcamcar): Cloud public DNAT entries, CloudPipe port
- # forwarding entries and a default DNAT entry.
+def metadata_forward():
+ """Create forwarding rule for metadata"""
_confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 "
"-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT "
"--to-destination %s:%s" % (FLAGS.cc_host, FLAGS.cc_port))
+
+def init_host():
+ """Basic networking setup goes here"""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
_confirm_rule("POSTROUTING", "-t nat -s %s "
@@ -134,7 +135,7 @@ def ensure_vlan(vlan_num):
if not _device_exists(interface):
logging.debug("Starting VLAN inteface %s", interface)
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
- _execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, vlan_num))
+ _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
_execute("sudo ifconfig %s up" % interface)
return interface
@@ -142,12 +143,13 @@ def ensure_vlan(vlan_num):
def ensure_bridge(bridge, interface, net_attrs=None):
"""Create a bridge unless it already exists"""
if not _device_exists(bridge):
- logging.debug("Starting Bridge inteface for %s", interface)
+ logging.debug("Starting Bridge interface for %s", interface)
_execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge)
# _execute("sudo brctl setageing %s 10" % bridge)
_execute("sudo brctl stp %s off" % bridge)
- _execute("sudo brctl addif %s %s" % (bridge, interface))
+ if interface:
+ _execute("sudo brctl addif %s %s" % (bridge, interface))
if net_attrs:
_execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
(bridge,
@@ -244,7 +246,7 @@ def _confirm_rule(chain, cmd):
def _remove_rule(chain, cmd):
"""Remove iptables rule"""
if FLAGS.use_nova_chains:
- chain = "%S" % chain.lower()
+ chain = "%s" % chain.lower()
_execute("sudo iptables --delete %s %s" % (chain, cmd))
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 8a20cb491..a7298b47f 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -17,7 +17,31 @@
# under the License.
"""
-Network Hosts are responsible for allocating ips and setting up network
+Network Hosts are responsible for allocating ips and setting up network.
+
+There are multiple backend drivers that handle specific types of networking
+topologies. All of the network commands are issued to a subclass of
+:class:`NetworkManager`.
+
+**Related Flags**
+
+:network_driver: Driver to use for network creation
+:flat_network_bridge: Bridge device for simple network instances
+:flat_interface: FlatDhcp will bridge into this interface if set
+:flat_network_dns: Dns for simple network
+:flat_network_dhcp_start: Dhcp start for FlatDhcp
+:vlan_start: First VLAN for private networks
+:vpn_ip: Public IP for the cloudpipe VPN servers
+:vpn_start: First Vpn port for private networks
+:cnt_vpn_clients: Number of addresses reserved for vpn clients
+:network_size: Number of addresses in each private subnet
+:floating_range: Floating IP address block
+:fixed_range: Fixed IP address block
+:date_dhcp_on_disassociate: Whether to update dhcp when fixed_ip
+ is disassociated
+:fixed_ip_disassociate_timeout: Seconds after which a deallocated ip
+ is disassociated
+
"""
import datetime
@@ -40,7 +64,11 @@ flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
flags.DEFINE_string('flat_network_dns', '8.8.4.4',
'Dns for simple network')
-flags.DEFINE_string('flat_network_dhcp_start', '192.168.0.2',
+flags.DEFINE_bool('flat_injected', True,
+ 'Whether to attempt to inject network setup into guest')
+flags.DEFINE_string('flat_interface', None,
+ 'FlatDhcp will bridge into this interface if set')
+flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2',
'Dhcp start for FlatDhcp')
flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support')
@@ -63,15 +91,16 @@ flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600,
class AddressAlreadyAllocated(exception.Error):
- """Address was already allocated"""
+ """Address was already allocated."""
pass
class NetworkManager(manager.Manager):
- """Implements common network manager functionality
+ """Implements common network manager functionality.
- This class must be subclassed.
+ This class must be subclassed to support specific topologies.
"""
+
def __init__(self, network_driver=None, *args, **kwargs):
if not network_driver:
network_driver = FLAGS.network_driver
@@ -86,7 +115,7 @@ class NetworkManager(manager.Manager):
self._on_set_network_host(ctxt, network['id'])
def set_network_host(self, context, network_id):
- """Safely sets the host of the network"""
+ """Safely sets the host of the network."""
logging.debug("setting network host")
host = self.db.network_set_host(context,
network_id,
@@ -95,34 +124,34 @@ class NetworkManager(manager.Manager):
return host
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from the pool"""
+ """Gets a fixed ip from the pool."""
raise NotImplementedError()
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool"""
+ """Returns a fixed ip to the pool."""
raise NotImplementedError()
def setup_fixed_ip(self, context, address):
- """Sets up rules for fixed ip"""
+ """Sets up rules for fixed ip."""
raise NotImplementedError()
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a network"""
+ """Called when this host becomes the host for a network."""
raise NotImplementedError()
def setup_compute_network(self, context, instance_id):
- """Sets up matching network for compute hosts"""
+ """Sets up matching network for compute hosts."""
raise NotImplementedError()
def allocate_floating_ip(self, context, project_id):
- """Gets an floating ip from the pool"""
+ """Gets an floating ip from the pool."""
# TODO(vish): add floating ips through manage command
return self.db.floating_ip_allocate_address(context,
self.host,
project_id)
def associate_floating_ip(self, context, floating_address, fixed_address):
- """Associates an floating ip to a fixed ip"""
+ """Associates an floating ip to a fixed ip."""
self.db.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address)
@@ -130,18 +159,18 @@ class NetworkManager(manager.Manager):
self.driver.ensure_floating_forward(floating_address, fixed_address)
def disassociate_floating_ip(self, context, floating_address):
- """Disassociates a floating ip"""
+ """Disassociates a floating ip."""
fixed_address = self.db.floating_ip_disassociate(context,
floating_address)
self.driver.unbind_floating_ip(floating_address)
self.driver.remove_floating_forward(floating_address, fixed_address)
def deallocate_floating_ip(self, context, floating_address):
- """Returns an floating ip to the pool"""
+ """Returns an floating ip to the pool."""
self.db.floating_ip_deallocate(context, floating_address)
def lease_fixed_ip(self, context, mac, address):
- """Called by dhcp-bridge when ip is leased"""
+ """Called by dhcp-bridge when ip is leased."""
logging.debug("Leasing IP %s", address)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
@@ -151,14 +180,16 @@ class NetworkManager(manager.Manager):
if instance_ref['mac_address'] != mac:
raise exception.Error("IP %s leased to bad mac %s vs %s" %
(address, instance_ref['mac_address'], mac))
+ now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
- {'leased': True})
+ {'leased': True,
+ 'updated_at': now})
if not fixed_ip_ref['allocated']:
logging.warn("IP %s leased that was already deallocated", address)
def release_fixed_ip(self, context, mac, address):
- """Called by dhcp-bridge when ip is released"""
+ """Called by dhcp-bridge when ip is released."""
logging.debug("Releasing IP %s", address)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
@@ -183,26 +214,26 @@ class NetworkManager(manager.Manager):
self.driver.update_dhcp(context, network_ref['id'])
def get_network(self, context):
- """Get the network for the current context"""
+ """Get the network for the current context."""
raise NotImplementedError()
def create_networks(self, context, num_networks, network_size,
*args, **kwargs):
- """Create networks based on parameters"""
+ """Create networks based on parameters."""
raise NotImplementedError()
@property
def _bottom_reserved_ips(self): # pylint: disable-msg=R0201
- """Number of reserved ips at the bottom of the range"""
+ """Number of reserved ips at the bottom of the range."""
return 2 # network, gateway
@property
def _top_reserved_ips(self): # pylint: disable-msg=R0201
- """Number of reserved ips at the top of the range"""
+ """Number of reserved ips at the top of the range."""
return 1 # broadcast
def _create_fixed_ips(self, context, network_id):
- """Create all fixed ips for network"""
+ """Create all fixed ips for network."""
network_ref = self.db.network_get(context, network_id)
# NOTE(vish): Should these be properties of the network as opposed
# to properties of the manager class?
@@ -222,10 +253,34 @@ class NetworkManager(manager.Manager):
class FlatManager(NetworkManager):
- """Basic network where no vlans are used"""
+ """Basic network where no vlans are used.
+
+ FlatManager does not do any bridge or vlan creation. The user is
+ responsible for setting up whatever bridge is specified in
+ flat_network_bridge (br100 by default). This bridge needs to be created
+ on all compute hosts.
+
+ The idea is to create a single network for the host with a command like:
+ nova-manage network create 192.168.0.0/24 1 256. Creating multiple
+ networks for for one manager is currently not supported, but could be
+ added by modifying allocate_fixed_ip and get_network to get the a network
+ with new logic instead of network_get_by_bridge. Arbitrary lists of
+ addresses in a single network can be accomplished with manual db editing.
+
+ If flat_injected is True, the compute host will attempt to inject network
+ config into the guest. It attempts to modify /etc/network/interfaces and
+ currently only works on debian based systems. To support a wider range of
+ OSes, some other method may need to be devised to let the guest know which
+ ip it should be using so that it can configure itself. Perhaps an attached
+ disk or serial device with configuration info.
+
+ Metadata forwarding must be handled by the gateway, and since nova does
+ not do any setup in this mode, it must be done manually. Requests to
+ 169.254.169.254 port 80 will need to be forwarded to the api server.
+ """
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from the pool"""
+ """Gets a fixed ip from the pool."""
# TODO(vish): when this is called by compute, we can associate compute
# with a network, or a cluster of computes with a network
# and use that network here with a method like
@@ -239,21 +294,21 @@ class FlatManager(NetworkManager):
return address
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool"""
+ """Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address, {'allocated': False})
self.db.fixed_ip_disassociate(context.elevated(), address)
def setup_compute_network(self, context, instance_id):
- """Network is created manually"""
+ """Network is created manually."""
pass
def setup_fixed_ip(self, context, address):
- """Currently no setup"""
+ """Currently no setup."""
pass
def create_networks(self, context, cidr, num_networks, network_size,
*args, **kwargs):
- """Create networks based on parameters"""
+ """Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
for index in range(num_networks):
start = index * network_size
@@ -261,6 +316,7 @@ class FlatManager(NetworkManager):
cidr = "%s/%s" % (fixed_net[start], significant_bits)
project_net = IPy.IP(cidr)
net = {}
+ net['bridge'] = FLAGS.flat_network_bridge
net['cidr'] = cidr
net['netmask'] = str(project_net.netmask())
net['gateway'] = str(project_net[1])
@@ -271,7 +327,7 @@ class FlatManager(NetworkManager):
self._create_fixed_ips(context, network_ref['id'])
def get_network(self, context):
- """Get the network for the current context"""
+ """Get the network for the current context."""
# NOTE(vish): To support mutilple network hosts, This could randomly
# select from multiple networks instead of just
# returning the one. It could also potentially be done
@@ -280,44 +336,72 @@ class FlatManager(NetworkManager):
FLAGS.flat_network_bridge)
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a network"""
+ """Called when this host becomes the host for a network."""
net = {}
- net['injected'] = True
- net['bridge'] = FLAGS.flat_network_bridge
+ net['injected'] = FLAGS.flat_injected
net['dns'] = FLAGS.flat_network_dns
self.db.network_update(context, network_id, net)
-class FlatDHCPManager(NetworkManager):
- """Flat networking with dhcp"""
+class FlatDHCPManager(FlatManager):
+ """Flat networking with dhcp.
+
+ FlatDHCPManager will start up one dhcp server to give out addresses.
+ It never injects network settings into the guest. Otherwise it behaves
+ like FlatDHCPManager.
+ """
+
+ def init_host(self):
+ """Do any initialization that needs to be run if this is a
+ standalone service.
+ """
+ super(FlatDHCPManager, self).init_host()
+ self.driver.metadata_forward()
+
+ def setup_compute_network(self, context, instance_id):
+ """Sets up matching network for compute hosts."""
+ network_ref = db.network_get_by_instance(context, instance_id)
+ self.driver.ensure_bridge(network_ref['bridge'],
+ FLAGS.flat_interface,
+ network_ref)
def setup_fixed_ip(self, context, address):
- """Setup dhcp for this network"""
- network_ref = db.fixed_ip_get_by_address(context, address)
+ """Setup dhcp for this network."""
+ network_ref = db.fixed_ip_get_network(context, address)
self.driver.update_dhcp(context, network_ref['id'])
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool"""
+ """Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address, {'allocated': False})
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a project"""
- super(FlatDHCPManager, self)._on_set_network_host(context, network_id)
- network_ref = self.db.network_get(context, network_id)
- self.db.network_update(context,
- network_id,
- {'dhcp_start': FLAGS.flat_network_dhcp_start})
+ """Called when this host becomes the host for a project."""
+ net = {}
+ net['dhcp_start'] = FLAGS.flat_network_dhcp_start
+ self.db.network_update(context, network_id, net)
+ network_ref = db.network_get(context, network_id)
self.driver.ensure_bridge(network_ref['bridge'],
- FLAGS.bridge_dev,
+ FLAGS.flat_interface,
network_ref)
class VlanManager(NetworkManager):
- """Vlan network with dhcp"""
+ """Vlan network with dhcp.
+
+ VlanManager is the most complicated. It will create a host-managed
+ vlan for each project. Each project gets its own subnet. The networks
+ and associated subnets are created with nova-manage using a command like:
+ nova-manage network create 10.0.0.0/8 3 16. This will create 3 networks
+ of 16 addresses from the beginning of the 10.0.0.0 range.
+
+ A dhcp server is run for each subnet, so each project will have its own.
+ For this mode to be useful, each project will need a vpn to access the
+ instances in its subnet.
+ """
@defer.inlineCallbacks
def periodic_tasks(self, context=None):
- """Tasks to be run at a periodic interval"""
+ """Tasks to be run at a periodic interval."""
yield super(VlanManager, self).periodic_tasks(context)
now = datetime.datetime.utcnow()
timeout = FLAGS.fixed_ip_disassociate_timeout
@@ -330,13 +414,14 @@ class VlanManager(NetworkManager):
def init_host(self):
"""Do any initialization that needs to be run if this is a
- standalone service.
+ standalone service.
"""
super(VlanManager, self).init_host()
+ self.driver.metadata_forward()
self.driver.init_host()
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from the pool"""
+ """Gets a fixed ip from the pool."""
# TODO(vish): This should probably be getting project_id from
# the instance, but it is another trip to the db.
# Perhaps this method should take an instance_ref.
@@ -356,11 +441,11 @@ class VlanManager(NetworkManager):
return address
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool"""
+ """Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address, {'allocated': False})
def setup_fixed_ip(self, context, address):
- """Sets forwarding rules and dhcp for fixed ip"""
+ """Sets forwarding rules and dhcp for fixed ip."""
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
network_ref = self.db.fixed_ip_get_network(context, address)
if self.db.instance_is_vpn(context, fixed_ip_ref['instance_id']):
@@ -370,19 +455,19 @@ class VlanManager(NetworkManager):
self.driver.update_dhcp(context, network_ref['id'])
def setup_compute_network(self, context, instance_id):
- """Sets up matching network for compute hosts"""
+ """Sets up matching network for compute hosts."""
network_ref = db.network_get_by_instance(context, instance_id)
self.driver.ensure_vlan_bridge(network_ref['vlan'],
network_ref['bridge'])
def restart_nets(self):
- """Ensure the network for each user is enabled"""
+ """Ensure the network for each user is enabled."""
# TODO(vish): Implement this
pass
def create_networks(self, context, cidr, num_networks, network_size,
vlan_start, vpn_start):
- """Create networks based on parameters"""
+ """Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
for index in range(num_networks):
vlan = vlan_start + index
@@ -407,12 +492,12 @@ class VlanManager(NetworkManager):
self._create_fixed_ips(context, network_ref['id'])
def get_network(self, context):
- """Get the network for the current context"""
+ """Get the network for the current context."""
return self.db.project_get_network(context.elevated(),
context.project_id)
def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a network"""
+ """Called when this host becomes the host for a network."""
network_ref = self.db.network_get(context, network_id)
net = {}
net['vpn_public_address'] = FLAGS.vpn_ip
@@ -424,11 +509,11 @@ class VlanManager(NetworkManager):
@property
def _bottom_reserved_ips(self):
- """Number of reserved ips at the bottom of the range"""
+ """Number of reserved ips at the bottom of the range."""
return super(VlanManager, self)._bottom_reserved_ips + 1 # vpn server
@property
def _top_reserved_ips(self):
- """Number of reserved ips at the top of the range"""
+ """Number of reserved ips at the top of the range."""
parent_reserved = super(VlanManager, self)._top_reserved_ips
return parent_reserved + FLAGS.cnt_vpn_clients
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
index fce3ec27b..82767e52f 100644
--- a/nova/objectstore/bucket.py
+++ b/nova/objectstore/bucket.py
@@ -78,8 +78,8 @@ class Bucket(object):
path = os.path.abspath(os.path.join(
FLAGS.buckets_path, bucket_name))
if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
- os.path.exists(path):
- raise exception.NotAuthorized()
+ os.path.exists(path):
+ raise exception.NotAuthorized()
os.makedirs(path)
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index b26906001..c8920b00c 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -61,6 +61,7 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
+flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.')
def render_xml(request, value):
@@ -438,6 +439,7 @@ def get_application():
# Disabled because of lack of proper introspection in Twisted
# or possibly different versions of twisted?
# pylint: disable-msg=E1101
- objectStoreService = internet.TCPServer(FLAGS.s3_port, factory)
+ objectStoreService = internet.TCPServer(FLAGS.s3_port, factory,
+ interface=FLAGS.s3_listen_host)
objectStoreService.setServiceParent(application)
return application
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index 51aef7343..7292dbab8 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -48,8 +48,8 @@ class Image(object):
self.image_id = image_id
self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id))
if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \
- not os.path.isdir(self.path):
- raise exception.NotFound
+ not os.path.isdir(self.path):
+ raise exception.NotFound
@property
def image_path(self):
diff --git a/nova/rpc.py b/nova/rpc.py
index ea36d69f4..86a29574f 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -24,6 +24,7 @@ No fan-out support yet.
import json
import logging
import sys
+import time
import uuid
from carrot import connection as carrot_connection
@@ -37,8 +38,8 @@ from nova import fakerabbit
from nova import flags
from nova import context
-FLAGS = flags.FLAGS
+FLAGS = flags.FLAGS
LOG = logging.getLogger('amqplib')
LOG.setLevel(logging.DEBUG)
@@ -82,8 +83,24 @@ class Consumer(messaging.Consumer):
Contains methods for connecting the fetch method to async loops
"""
def __init__(self, *args, **kwargs):
- self.failed_connection = False
- super(Consumer, self).__init__(*args, **kwargs)
+ for i in xrange(FLAGS.rabbit_max_retries):
+ if i > 0:
+ time.sleep(FLAGS.rabbit_retry_interval)
+ try:
+ super(Consumer, self).__init__(*args, **kwargs)
+ self.failed_connection = False
+ break
+ except: # Catching all because carrot sucks
+ logging.exception("AMQP server on %s:%d is unreachable." \
+ " Trying again in %d seconds." % (
+ FLAGS.rabbit_host,
+ FLAGS.rabbit_port,
+ FLAGS.rabbit_retry_interval))
+ self.failed_connection = True
+ if self.failed_connection:
+ logging.exception("Unable to connect to AMQP server" \
+ " after %d tries. Shutting down." % FLAGS.rabbit_max_retries)
+ sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
"""Wraps the parent fetch with some logic for failed connections"""
@@ -91,11 +108,12 @@ class Consumer(messaging.Consumer):
# refactored into some sort of connection manager object
try:
if self.failed_connection:
- # NOTE(vish): conn is defined in the parent class, we can
+ # NOTE(vish): connection is defined in the parent class, we can
# recreate it as long as we create the backend too
# pylint: disable-msg=W0201
- self.conn = Connection.recreate()
- self.backend = self.conn.create_backend()
+ self.connection = Connection.recreate()
+ self.backend = self.connection.create_backend()
+ self.declare()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection:
logging.error("Reconnected to queue")
diff --git a/nova/server.py b/nova/server.py
index cb424caa1..a0ee54681 100644
--- a/nova/server.py
+++ b/nova/server.py
@@ -42,6 +42,8 @@ flags.DEFINE_bool('daemonize', False, 'daemonize this process')
# clutter.
flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
flags.DEFINE_string('logfile', None, 'log file to output to')
+flags.DEFINE_string('logdir', None, 'directory to keep log files in '
+ '(will be prepended to $logfile)')
flags.DEFINE_string('pidfile', None, 'pid file to output to')
flags.DEFINE_string('working_directory', './', 'working directory...')
flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run')
@@ -119,6 +121,8 @@ def daemonize(args, name, main):
else:
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
+ if FLAGS.logdir:
+ FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
logfile = logging.FileHandler(FLAGS.logfile)
logfile.setFormatter(formatter)
logger.addHandler(logfile)
diff --git a/nova/service.py b/nova/service.py
index d53d92b65..9454d4049 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -17,7 +17,12 @@
# under the License.
"""
-Generic Node baseclass for all workers that run on hosts
+A service is a very thin wrapper around a Manager object. It exposes the
+manager's public methods to other components of the system via rpc. It will
+report state periodically to the database and is responsible for initiating
+any periodic tasts that need to be executed on a given host.
+
+This module contains Service, a generic baseclass for all workers.
"""
import inspect
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 52b392601..639a2ebe4 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -29,7 +29,7 @@ from nova import flags
from nova import exception as exc
import nova.api.openstack.auth
from nova.image import service
-from nova.image.services import glance
+from nova.image import glance
from nova.tests import fake_flags
from nova.wsgi import Router
@@ -76,7 +76,7 @@ def stub_out_image_service(stubs):
def fake_image_show(meh, id):
return dict(kernelId=1, ramdiskId=1)
- stubs.Set(nova.image.service.LocalImageService, 'show', fake_image_show)
+ stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show)
def stub_out_auth(stubs):
@@ -151,21 +151,19 @@ def stub_out_glance(stubs, initial_fixtures=[]):
self.fixtures = []
fake_parallax_client = FakeParallaxClient(initial_fixtures)
- stubs.Set(nova.image.services.glance.ParallaxClient, 'get_image_index',
+ stubs.Set(nova.image.glance.ParallaxClient, 'get_image_index',
fake_parallax_client.fake_get_image_index)
- stubs.Set(nova.image.services.glance.ParallaxClient, 'get_image_details',
+ stubs.Set(nova.image.glance.ParallaxClient, 'get_image_details',
fake_parallax_client.fake_get_image_details)
- stubs.Set(nova.image.services.glance.ParallaxClient, 'get_image_metadata',
+ stubs.Set(nova.image.glance.ParallaxClient, 'get_image_metadata',
fake_parallax_client.fake_get_image_metadata)
- stubs.Set(nova.image.services.glance.ParallaxClient, 'add_image_metadata',
+ stubs.Set(nova.image.glance.ParallaxClient, 'add_image_metadata',
fake_parallax_client.fake_add_image_metadata)
- stubs.Set(nova.image.services.glance.ParallaxClient,
- 'update_image_metadata',
+ stubs.Set(nova.image.glance.ParallaxClient, 'update_image_metadata',
fake_parallax_client.fake_update_image_metadata)
- stubs.Set(nova.image.services.glance.ParallaxClient,
- 'delete_image_metadata',
+ stubs.Set(nova.image.glance.ParallaxClient, 'delete_image_metadata',
fake_parallax_client.fake_delete_image_metadata)
- stubs.Set(nova.image.services.glance.GlanceImageService, 'delete_all',
+ stubs.Set(nova.image.glance.GlanceImageService, 'delete_all',
fake_parallax_client.fake_delete_all)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 0f3941c29..f610cbf9c 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -28,6 +28,7 @@ import unittest
import stubout
import webob
+from nova import context
from nova import exception
from nova import flags
from nova import utils
@@ -52,12 +53,13 @@ class BaseImageServiceTests(object):
'serverId': None,
'progress': None}
- num_images = len(self.service.index())
+ num_images = len(self.service.index(self.context))
- id = self.service.create(fixture)
+ id = self.service.create(self.context, fixture)
self.assertNotEquals(None, id)
- self.assertEquals(num_images + 1, len(self.service.index()))
+ self.assertEquals(num_images + 1,
+ len(self.service.index(self.context)))
def test_create_and_show_non_existing_image(self):
@@ -68,14 +70,15 @@ class BaseImageServiceTests(object):
'serverId': None,
'progress': None}
- num_images = len(self.service.index())
+ num_images = len(self.service.index(self.context))
- id = self.service.create(fixture)
+ id = self.service.create(self.context, fixture)
self.assertNotEquals(None, id)
self.assertRaises(exception.NotFound,
self.service.show,
+ self.context,
'bad image id')
def test_update(self):
@@ -87,12 +90,12 @@ class BaseImageServiceTests(object):
'serverId': None,
'progress': None}
- id = self.service.create(fixture)
+ id = self.service.create(self.context, fixture)
fixture['status'] = 'in progress'
- self.service.update(id, fixture)
- new_image_data = self.service.show(id)
+ self.service.update(self.context, id, fixture)
+ new_image_data = self.service.show(self.context, id)
self.assertEquals('in progress', new_image_data['status'])
def test_delete(self):
@@ -111,20 +114,20 @@ class BaseImageServiceTests(object):
'serverId': None,
'progress': None}]
- num_images = len(self.service.index())
- self.assertEquals(0, num_images, str(self.service.index()))
+ num_images = len(self.service.index(self.context))
+ self.assertEquals(0, num_images, str(self.service.index(self.context)))
ids = []
for fixture in fixtures:
- new_id = self.service.create(fixture)
+ new_id = self.service.create(self.context, fixture)
ids.append(new_id)
- num_images = len(self.service.index())
- self.assertEquals(2, num_images, str(self.service.index()))
+ num_images = len(self.service.index(self.context))
+ self.assertEquals(2, num_images, str(self.service.index(self.context)))
- self.service.delete(ids[0])
+ self.service.delete(self.context, ids[0])
- num_images = len(self.service.index())
+ num_images = len(self.service.index(self.context))
self.assertEquals(1, num_images)
@@ -135,8 +138,9 @@ class LocalImageServiceTest(unittest.TestCase,
def setUp(self):
self.stubs = stubout.StubOutForTesting()
- service_class = 'nova.image.service.LocalImageService'
+ service_class = 'nova.image.local.LocalImageService'
self.service = utils.import_object(service_class)
+ self.context = context.RequestContext(None, None)
def tearDown(self):
self.service.delete_all()
@@ -151,8 +155,9 @@ class GlanceImageServiceTest(unittest.TestCase,
def setUp(self):
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_glance(self.stubs)
- service_class = 'nova.image.services.glance.GlanceImageService'
+ service_class = 'nova.image.glance.GlanceImageService'
self.service = utils.import_object(service_class)
+ self.context = context.RequestContext(None, None)
self.service.delete_all()
def tearDown(self):
@@ -187,7 +192,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
def setUp(self):
self.orig_image_service = FLAGS.image_service
- FLAGS.image_service = 'nova.image.services.glance.GlanceImageService'
+ FLAGS.image_service = 'nova.image.glance.GlanceImageService'
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 2d61d2675..9886a2449 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -91,6 +91,41 @@ class CloudTestCase(test.TrialTestCase):
# NOTE(vish): create depends on pool, so just call helper directly
return cloud._gen_key(self.context, self.context.user.id, name)
+ def test_describe_addresses(self):
+ """Makes sure describe addresses runs without raising an exception"""
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'host': FLAGS.host})
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ greenthread.sleep(0.3)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_associate_disassociate_address(self):
+ """Verifies associate runs cleanly without raising an exception"""
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'host': FLAGS.host})
+ self.cloud.allocate_address(self.context)
+ inst = db.instance_create(self.context, {})
+ fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
+ ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
+ self.cloud.associate_address(self.context,
+ instance_id=ec2_id,
+ public_ip=address)
+ self.cloud.disassociate_address(self.context,
+ public_ip=address)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ greenthread.sleep(0.3)
+ self.network.deallocate_fixed_ip(self.context, fixed)
+ db.instance_destroy(self.context, inst['id'])
+ db.floating_ip_destroy(self.context, address)
+
def test_console_output(self):
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 501162465..7376a11dd 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -24,6 +24,7 @@ flags.DECLARE('volume_driver', 'nova.volume.manager')
FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver'
FLAGS.connection_type = 'fake'
FLAGS.fake_rabbit = True
+flags.DECLARE('auth_driver', 'nova.auth.manager')
FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
diff --git a/nova/tests/misc_unittest.py b/nova/tests/misc_unittest.py
new file mode 100644
index 000000000..856060afa
--- /dev/null
+++ b/nova/tests/misc_unittest.py
@@ -0,0 +1,48 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import subprocess
+
+from nova import test
+from nova.utils import parse_mailmap, str_dict_replace
+
+
+class ProjectTestCase(test.TrialTestCase):
+ def test_authors_up_to_date(self):
+ if os.path.exists('../.bzr'):
+ log_cmd = subprocess.Popen(["bzr", "log", "-n0"],
+ stdout=subprocess.PIPE)
+ changelog = log_cmd.communicate()[0]
+ mailmap = parse_mailmap('../.mailmap')
+
+ contributors = set()
+ for l in changelog.split('\n'):
+ l = l.strip()
+ if (l.startswith('author:') or l.startswith('committer:')
+ and not l == 'committer: Tarmac'):
+ email = l.split(' ')[-1]
+ contributors.add(str_dict_replace(email, mailmap))
+
+ authors_file = open('../Authors', 'r').read()
+
+ missing = set()
+ for contributor in contributors:
+ if not contributor in authors_file:
+ missing.add(contributor)
+
+ self.assertTrue(len(missing) == 0,
+ '%r not listed in Authors' % missing)
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index b7caed4fd..6f4705719 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -41,7 +41,6 @@ class NetworkTestCase(test.TrialTestCase):
# flags in the corresponding section in nova-dhcpbridge
self.flags(connection_type='fake',
fake_network=True,
- auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
network_size=16,
num_networks=5)
logging.getLogger().setLevel(logging.DEBUG)
@@ -127,6 +126,7 @@ class NetworkTestCase(test.TrialTestCase):
self.network.deallocate_floating_ip(self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
release_ip(fix_addr)
+ db.floating_ip_destroy(context.get_admin_context(), float_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py
index 9e3afbf4e..b7c1d2acc 100644
--- a/nova/tests/quota_unittest.py
+++ b/nova/tests/quota_unittest.py
@@ -138,11 +138,8 @@ class QuotaTestCase(test.TrialTestCase):
def test_too_many_addresses(self):
address = '192.168.0.100'
- try:
- db.floating_ip_get_by_address(context.get_admin_context(), address)
- except exception.NotFound:
- db.floating_ip_create(context.get_admin_context(),
- {'address': address, 'host': FLAGS.host})
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address, 'host': FLAGS.host})
float_addr = self.network.allocate_floating_ip(self.context,
self.project.id)
# NOTE(vish): This assert never fails. When cloud attempts to
@@ -151,3 +148,4 @@ class QuotaTestCase(test.TrialTestCase):
# that is breaking.
self.assertRaises(cloud.QuotaError, self.cloud.allocate_address,
self.context)
+ db.floating_ip_destroy(context.get_admin_context(), address)
diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
index 7ff9b58d7..12321a96f 100644
--- a/nova/tests/volume_unittest.py
+++ b/nova/tests/volume_unittest.py
@@ -16,7 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Tests for Volume Code
+Tests for Volume Code.
+
"""
import logging
@@ -33,7 +34,8 @@ FLAGS = flags.FLAGS
class VolumeTestCase(test.TrialTestCase):
- """Test Case for volumes"""
+ """Test Case for volumes."""
+
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
@@ -44,7 +46,7 @@ class VolumeTestCase(test.TrialTestCase):
@staticmethod
def _create_volume(size='0'):
- """Create a volume object"""
+ """Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
@@ -56,7 +58,7 @@ class VolumeTestCase(test.TrialTestCase):
@defer.inlineCallbacks
def test_create_delete_volume(self):
- """Test volume can be created and deleted"""
+ """Test volume can be created and deleted."""
volume_id = self._create_volume()
yield self.volume.create_volume(self.context, volume_id)
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
@@ -70,7 +72,7 @@ class VolumeTestCase(test.TrialTestCase):
@defer.inlineCallbacks
def test_too_big_volume(self):
- """Ensure failure if a too large of a volume is requested"""
+ """Ensure failure if a too large of a volume is requested."""
# FIXME(vish): validation needs to move into the data layer in
# volume_create
defer.returnValue(True)
@@ -83,7 +85,7 @@ class VolumeTestCase(test.TrialTestCase):
@defer.inlineCallbacks
def test_too_many_volumes(self):
- """Ensure that NoMoreTargets is raised when we run out of volumes"""
+ """Ensure that NoMoreTargets is raised when we run out of volumes."""
vols = []
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
@@ -100,7 +102,7 @@ class VolumeTestCase(test.TrialTestCase):
@defer.inlineCallbacks
def test_run_attach_detach_volume(self):
- """Make sure volume can be attached and detached from instance"""
+ """Make sure volume can be attached and detached from instance."""
inst = {}
inst['image_id'] = 'ami-test'
inst['reservation_id'] = 'r-fakeres'
@@ -149,12 +151,12 @@ class VolumeTestCase(test.TrialTestCase):
@defer.inlineCallbacks
def test_concurrent_volumes_get_different_targets(self):
- """Ensure multiple concurrent volumes get different targets"""
+ """Ensure multiple concurrent volumes get different targets."""
volume_ids = []
targets = []
def _check(volume_id):
- """Make sure targets aren't duplicated"""
+ """Make sure targets aren't duplicated."""
volume_ids.append(volume_id)
admin_context = context.get_admin_context()
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
diff --git a/nova/twistd.py b/nova/twistd.py
index 3ec0ff61e..cb5648ce6 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -43,6 +43,8 @@ else:
FLAGS = flags.FLAGS
+flags.DEFINE_string('logdir', None, 'directory to keep log files in '
+ '(will be prepended to $logfile)')
class TwistdServerOptions(ServerOptions):
@@ -246,6 +248,8 @@ def serve(filename):
FLAGS.logfile = '%s.log' % name
elif FLAGS.logfile.endswith('twistd.log'):
FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
+ if FLAGS.logdir:
+ FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
if not FLAGS.prefix:
FLAGS.prefix = name
elif FLAGS.prefix.endswith('twisted'):
diff --git a/nova/utils.py b/nova/utils.py
index e7892a212..142584df8 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -21,6 +21,7 @@ System-level utilities and helper functions.
"""
import datetime
+import functools
import inspect
import logging
import os
@@ -174,6 +175,24 @@ def parse_isotime(timestr):
return datetime.datetime.strptime(timestr, TIME_FORMAT)
+def parse_mailmap(mailmap='.mailmap'):
+ mapping = {}
+ if os.path.exists(mailmap):
+ fp = open(mailmap, 'r')
+ for l in fp:
+ l = l.strip()
+ if not l.startswith('#') and ' ' in l:
+ canonical_email, alias = l.split(' ')
+ mapping[alias] = canonical_email
+ return mapping
+
+
+def str_dict_replace(s, mapping):
+ for s1, s2 in mapping.iteritems():
+ s = s.replace(s1, s2)
+ return s
+
+
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 34e37adf7..11f0fa8ce 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -17,7 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Abstraction of the underlying virtualization API"""
+"""Abstraction of the underlying virtualization API."""
import logging
import sys
@@ -32,13 +32,26 @@ FLAGS = flags.FLAGS
def get_connection(read_only=False):
- """Returns an object representing the connection to a virtualization
- platform. This could be nova.virt.fake.FakeConnection in test mode,
- a connection to KVM or QEMU via libvirt, or a connection to XenServer
- or Xen Cloud Platform via XenAPI.
+ """
+ Returns an object representing the connection to a virtualization
+ platform.
+
+ This could be :mod:`nova.virt.fake.FakeConnection` in test mode,
+ a connection to KVM, QEMU, or UML via :mod:`libvirt_conn`, or a connection
+ to XenServer or Xen Cloud Platform via :mod:`xenapi`.
Any object returned here must conform to the interface documented by
- FakeConnection.
+ :mod:`FakeConnection`.
+
+ **Related flags**
+
+ :connection_type: A string literal that falls through a if/elif structure
+ to determine what virtualization mechanism to use.
+ Values may be
+
+ * fake
+ * libvirt
+ * xenapi
"""
# TODO(termie): maybe lazy load after initial check for permissions
# TODO(termie): check whether we can be disconnected
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 66eff4c66..f855523d3 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -18,8 +18,11 @@
# under the License.
"""
-A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor.
-This module also documents the semantics of real hypervisor connections.
+A fake (in-memory) hypervisor+api.
+
+Allows nova testing w/o a hypervisor. This module also documents the
+semantics of real hypervisor connections.
+
"""
from twisted.internet import defer
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 4f7fd72f0..18085089f 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -18,7 +18,27 @@
# under the License.
"""
-A connection to a hypervisor (e.g. KVM) through libvirt.
+A connection to a hypervisor through libvirt.
+
+Supports KVM, QEMU, UML, and XEN.
+
+**Related Flags**
+
+:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
+ (default: kvm).
+:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
+:libvirt_xml_template: Libvirt XML Template (QEmu/KVM).
+:libvirt_xen_xml_template: Libvirt XML Template (Xen).
+:libvirt_uml_xml_template: Libvirt XML Template (User Mode Linux).
+:libvirt_rescue_xml_template: XML template for rescue mode (KVM & QEMU).
+:libvirt_rescue_xen_xml_template: XML templage for rescue mode (XEN).
+:libvirt_rescue_uml_xml_template: XML template for rescue mode (UML).
+:rescue_image_id: Rescue ami image (default: ami-rescue).
+:rescue_kernel_id: Rescue aki image (default: aki-rescue).
+:rescue_ramdisk_id: Rescue ari image (default: ari-rescue).
+:injected_network_template: Template file for injected network
+:allow_project_net_traffic: Whether to allow in project network traffic
+
"""
import logging
@@ -400,7 +420,7 @@ class LibvirtConnection(object):
@defer.inlineCallbacks
def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
# syntactic nicety
- basepath = lambda fname='', prefix=prefix: os.path.join(
+ basepath = lambda fname = '', prefix = prefix: os.path.join(
FLAGS.instances_path,
inst['name'],
prefix + fname)
@@ -437,7 +457,7 @@ class LibvirtConnection(object):
yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user,
project)
- execute = lambda cmd, process_input=None, check_exit_code=True: \
+ execute = lambda cmd, process_input = None, check_exit_code = True: \
process.simple_execute(cmd=cmd,
process_input=process_input,
check_exit_code=check_exit_code)
diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py
index a17e405ab..3169562a5 100644
--- a/nova/virt/xenapi.py
+++ b/nova/virt/xenapi.py
@@ -33,6 +33,18 @@ long-running operations.
FIXME: get_info currently doesn't conform to these rules, and will block the
reactor thread if the VM.get_by_name_label or VM.get_record calls block.
+
+**Related Flags**
+
+:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
+:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
+ Platform (default: root).
+:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
+ Platform.
+:xenapi_task_poll_interval: The interval (seconds) used for polling of
+ remote tasks (Async.VM.start, etc)
+ (default: 0.5).
+
"""
import logging
@@ -274,11 +286,21 @@ class XenAPIConnection(object):
# Don't complain, just return. This lets us clean up instances
# that have already disappeared from the underlying platform.
defer.returnValue(None)
+ # Get the VDIs related to the VM
+ vdis = yield self._lookup_vm_vdis(vm)
try:
task = yield self._call_xenapi('Async.VM.hard_shutdown', vm)
yield self._wait_for_task(task)
except Exception, exc:
logging.warn(exc)
+ # Disk clean-up
+ if vdis:
+ for vdi in vdis:
+ try:
+ task = yield self._call_xenapi('Async.VDI.destroy', vdi)
+ yield self._wait_for_task(task)
+ except Exception, exc:
+ logging.warn(exc)
try:
task = yield self._call_xenapi('Async.VM.destroy', vm)
yield self._wait_for_task(task)
@@ -313,6 +335,30 @@ class XenAPIConnection(object):
else:
return vms[0]
+ @utils.deferredToThread
+ def _lookup_vm_vdis(self, vm):
+ return self._lookup_vm_vdis_blocking(vm)
+
+ def _lookup_vm_vdis_blocking(self, vm):
+ # Firstly we get the VBDs, then the VDIs.
+ # TODO: do we leave the read-only devices?
+ vbds = self._conn.xenapi.VM.get_VBDs(vm)
+ vdis = []
+ if vbds:
+ for vbd in vbds:
+ try:
+ vdi = self._conn.xenapi.VBD.get_VDI(vbd)
+ # Test valid VDI
+ record = self._conn.xenapi.VDI.get_record(vdi)
+ except Exception, exc:
+ logging.warn(exc)
+ else:
+ vdis.append(vdi)
+ if len(vdis) > 0:
+ return vdis
+ else:
+ return None
+
def _wait_for_task(self, task):
"""Return a Deferred that will give the result of the given task.
The task is polled until it completes."""
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 6b0510704..156aad2a0 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -15,9 +15,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
-Drivers for volumes
+Drivers for volumes.
+
"""
import logging
@@ -54,7 +54,7 @@ flags.DEFINE_string('iscsi_ip_prefix', '127.0',
class VolumeDriver(object):
- """Executes commands relating to Volumes"""
+ """Executes commands relating to Volumes."""
def __init__(self, execute=process.simple_execute,
sync_exec=utils.execute, *args, **kwargs):
# NOTE(vish): db is set by Manager
@@ -88,7 +88,7 @@ class VolumeDriver(object):
@defer.inlineCallbacks
def create_volume(self, volume):
- """Creates a logical volume"""
+ """Creates a logical volume."""
if int(volume['size']) == 0:
sizestr = '100M'
else:
@@ -100,7 +100,7 @@ class VolumeDriver(object):
@defer.inlineCallbacks
def delete_volume(self, volume):
- """Deletes a logical volume"""
+ """Deletes a logical volume."""
yield self._try_execute("sudo lvremove -f %s/%s" %
(FLAGS.volume_group,
volume['name']))
@@ -114,39 +114,39 @@ class VolumeDriver(object):
escaped_name))
def ensure_export(self, context, volume):
- """Safely and synchronously recreates an export for a logical volume"""
+ """Synchronously recreates an export for a logical volume."""
raise NotImplementedError()
@defer.inlineCallbacks
def create_export(self, context, volume):
- """Exports the volume"""
+ """Exports the volume."""
raise NotImplementedError()
@defer.inlineCallbacks
def remove_export(self, context, volume):
- """Removes an export for a logical volume"""
+ """Removes an export for a logical volume."""
raise NotImplementedError()
@defer.inlineCallbacks
def discover_volume(self, volume):
- """Discover volume on a remote host"""
+ """Discover volume on a remote host."""
raise NotImplementedError()
@defer.inlineCallbacks
def undiscover_volume(self, volume):
- """Undiscover volume on a remote host"""
+ """Undiscover volume on a remote host."""
raise NotImplementedError()
class AOEDriver(VolumeDriver):
- """Implements AOE specific volume commands"""
+ """Implements AOE specific volume commands."""
def ensure_export(self, context, volume):
# NOTE(vish): we depend on vblade-persist for recreating exports
pass
def _ensure_blades(self, context):
- """Ensure that blades have been created in datastore"""
+ """Ensure that blades have been created in datastore."""
total_blades = FLAGS.num_shelves * FLAGS.blades_per_shelf
if self.db.export_device_count(context) >= total_blades:
return
@@ -157,7 +157,7 @@ class AOEDriver(VolumeDriver):
@defer.inlineCallbacks
def create_export(self, context, volume):
- """Creates an export for a logical volume"""
+ """Creates an export for a logical volume."""
self._ensure_blades(context)
(shelf_id,
blade_id) = self.db.volume_allocate_shelf_and_blade(context,
@@ -184,7 +184,7 @@ class AOEDriver(VolumeDriver):
@defer.inlineCallbacks
def remove_export(self, context, volume):
- """Removes an export for a logical volume"""
+ """Removes an export for a logical volume."""
(shelf_id,
blade_id) = self.db.volume_get_shelf_and_blade(context,
volume['id'])
@@ -195,39 +195,40 @@ class AOEDriver(VolumeDriver):
@defer.inlineCallbacks
def discover_volume(self, _volume):
- """Discover volume on a remote host"""
+ """Discover volume on a remote host."""
yield self._execute("sudo aoe-discover")
yield self._execute("sudo aoe-stat", check_exit_code=False)
@defer.inlineCallbacks
def undiscover_volume(self, _volume):
- """Undiscover volume on a remote host"""
+ """Undiscover volume on a remote host."""
yield
class FakeAOEDriver(AOEDriver):
- """Logs calls instead of executing"""
+ """Logs calls instead of executing."""
+
def __init__(self, *args, **kwargs):
super(FakeAOEDriver, self).__init__(execute=self.fake_execute,
sync_exec=self.fake_execute,
*args, **kwargs)
def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met"""
+ """No setup necessary in fake mode."""
pass
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
- """Execute that simply logs the command"""
+ """Execute that simply logs the command."""
logging.debug("FAKE AOE: %s", cmd)
return (None, None)
class ISCSIDriver(VolumeDriver):
- """Executes commands relating to ISCSI volumes"""
+ """Executes commands relating to ISCSI volumes."""
def ensure_export(self, context, volume):
- """Safely and synchronously recreates an export for a logical volume"""
+ """Synchronously recreates an export for a logical volume."""
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
@@ -242,7 +243,7 @@ class ISCSIDriver(VolumeDriver):
check_exit_code=False)
def _ensure_iscsi_targets(self, context, host):
- """Ensure that target ids have been created in datastore"""
+ """Ensure that target ids have been created in datastore."""
host_iscsi_targets = self.db.iscsi_target_count_by_host(context, host)
if host_iscsi_targets >= FLAGS.iscsi_num_targets:
return
@@ -253,7 +254,7 @@ class ISCSIDriver(VolumeDriver):
@defer.inlineCallbacks
def create_export(self, context, volume):
- """Creates an export for a logical volume"""
+ """Creates an export for a logical volume."""
self._ensure_iscsi_targets(context, volume['host'])
iscsi_target = self.db.volume_allocate_iscsi_target(context,
volume['id'],
@@ -269,7 +270,7 @@ class ISCSIDriver(VolumeDriver):
@defer.inlineCallbacks
def remove_export(self, context, volume):
- """Removes an export for a logical volume"""
+ """Removes an export for a logical volume."""
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
yield self._execute("sudo ietadm --op delete --tid=%s "
@@ -279,6 +280,7 @@ class ISCSIDriver(VolumeDriver):
@defer.inlineCallbacks
def _get_name_and_portal(self, volume_name, host):
+ """Gets iscsi name and portal from volume name and host."""
(out, _err) = yield self._execute("sudo iscsiadm -m discovery -t "
"sendtargets -p %s" % host)
for target in out.splitlines():
@@ -290,7 +292,7 @@ class ISCSIDriver(VolumeDriver):
@defer.inlineCallbacks
def discover_volume(self, volume):
- """Discover volume on a remote host"""
+ """Discover volume on a remote host."""
(iscsi_name,
iscsi_portal) = yield self._get_name_and_portal(volume['name'],
volume['host'])
@@ -303,7 +305,7 @@ class ISCSIDriver(VolumeDriver):
@defer.inlineCallbacks
def undiscover_volume(self, volume):
- """Undiscover volume on a remote host"""
+ """Undiscover volume on a remote host."""
(iscsi_name,
iscsi_portal) = yield self._get_name_and_portal(volume['name'],
volume['host'])
@@ -317,18 +319,18 @@ class ISCSIDriver(VolumeDriver):
class FakeISCSIDriver(ISCSIDriver):
- """Logs calls instead of executing"""
+ """Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
sync_exec=self.fake_execute,
*args, **kwargs)
def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met"""
+ """No setup necessary in fake mode."""
pass
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
- """Execute that simply logs the command"""
+ """Execute that simply logs the command."""
logging.debug("FAKE ISCSI: %s", cmd)
return (None, None)
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index ee1c019ad..589e7d7d9 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -15,10 +15,31 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
"""
-Volume manager manages creating, attaching, detaching, and
-destroying persistent storage volumes, ala EBS.
+Volume manager manages creating, attaching, detaching, and persistent storage.
+
+Persistant storage volumes keep their state independent of instances. You can
+attach to an instance, terminate the instance, spawn a new instance (even
+one from a different image) and re-attach the volume with the same data
+intact.
+
+**Related Flags**
+
+:volume_topic: What :mod:`rpc` topic to listen to (default: `volume`).
+:volume_manager: The module name of a class derived from
+ :class:`manager.Manager` (default:
+ :class:`nova.volume.manager.AOEManager`).
+:storage_availability_zone: Defaults to `nova`.
+:volume_driver: Used by :class:`AOEManager`. Defaults to
+ :class:`nova.volume.driver.AOEDriver`.
+:num_shelves: Number of shelves for AoE (default: 100).
+:num_blades: Number of vblades per shelf to allocate AoE storage from
+ (default: 16).
+:volume_group: Name of the group that will contain exported volumes (default:
+ `nova-volumes`)
+:aoe_eth_dev: Device name the volumes will be exported on (default: `eth0`).
+:num_shell_tries: Number of times to attempt to run AoE commands (default: 3)
+
"""
import logging
@@ -44,8 +65,9 @@ flags.DEFINE_boolean('use_local_volumes', True,
class VolumeManager(manager.Manager):
- """Manages attachable block storage devices"""
+ """Manages attachable block storage devices."""
def __init__(self, volume_driver=None, *args, **kwargs):
+ """Load the driver from the one specified in args, or from flags."""
if not volume_driver:
volume_driver = FLAGS.volume_driver
self.driver = utils.import_object(volume_driver)
@@ -56,8 +78,7 @@ class VolumeManager(manager.Manager):
def init_host(self):
"""Do any initialization that needs to be run if this is a
- standalone service.
- """
+ standalone service."""
self.driver.check_for_setup_error()
ctxt = context.get_admin_context()
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
@@ -67,7 +88,7 @@ class VolumeManager(manager.Manager):
@defer.inlineCallbacks
def create_volume(self, context, volume_id):
- """Creates and exports the volume"""
+ """Creates and exports the volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
logging.info("volume %s: creating", volume_ref['name'])
@@ -95,7 +116,7 @@ class VolumeManager(manager.Manager):
@defer.inlineCallbacks
def delete_volume(self, context, volume_id):
- """Deletes and unexports volume"""
+ """Deletes and unexports volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
if volume_ref['attach_status'] == "attached":
@@ -112,10 +133,9 @@ class VolumeManager(manager.Manager):
@defer.inlineCallbacks
def setup_compute_volume(self, context, volume_id):
- """Setup remote volume on compute host
+ """Setup remote volume on compute host.
- Returns path to device.
- """
+ Returns path to device."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
@@ -126,7 +146,7 @@ class VolumeManager(manager.Manager):
@defer.inlineCallbacks
def remove_compute_volume(self, context, volume_id):
- """Remove remote volume on compute host """
+ """Remove remote volume on compute host."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
diff --git a/nova/wsgi.py b/nova/wsgi.py
index b04b487ea..c7ee9ed14 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -28,7 +28,7 @@ from xml.dom import minidom
import eventlet
import eventlet.wsgi
-eventlet.patcher.monkey_patch(all=False, socket=True)
+eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
import routes
import routes.middleware
import webob