summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnthony Young <sleepsonthefloor@gmail.com>2010-12-28 18:11:02 -0800
committerAnthony Young <sleepsonthefloor@gmail.com>2010-12-28 18:11:02 -0800
commit4b5bd4b4a8db70c40180c745f9bfc33aa6572e09 (patch)
treebabb1bb8d1d3cf58bb74ce69813c6fcf46ec9751
parent35638077a186f9315ac6e30cdbe096730a540ed8 (diff)
parent71be236ef4a1fd956f7462ff236ff53d93fef2dc (diff)
downloadnova-4b5bd4b4a8db70c40180c745f9bfc33aa6572e09.tar.gz
nova-4b5bd4b4a8db70c40180c745f9bfc33aa6572e09.tar.xz
nova-4b5bd4b4a8db70c40180c745f9bfc33aa6572e09.zip
merge trunk
-rw-r--r--.mailmap2
-rw-r--r--Authors5
-rwxr-xr-xbin/nova-combined3
-rwxr-xr-xbin/nova-dhcpbridge1
-rwxr-xr-xcontrib/puppet/files/production/nova-iptables2
-rw-r--r--nova/api/__init__.py1
-rw-r--r--nova/api/openstack/__init__.py126
-rw-r--r--nova/api/openstack/auth.py59
-rw-r--r--nova/api/openstack/common.py36
-rw-r--r--nova/api/openstack/flavors.py3
-rw-r--r--nova/api/openstack/images.py86
-rw-r--r--nova/api/openstack/ratelimiting/__init__.py96
-rw-r--r--nova/api/openstack/servers.py32
-rw-r--r--nova/api/openstack/sharedipgroups.py20
-rw-r--r--nova/auth/fakeldap.py3
-rw-r--r--nova/auth/ldapdriver.py161
-rw-r--r--nova/auth/nova_openldap.schema46
-rw-r--r--nova/auth/nova_sun.schema13
-rwxr-xr-xnova/auth/opendj.sh1
-rwxr-xr-xnova/auth/slap.sh3
-rwxr-xr-xnova/cloudpipe/bootscript.template1
-rw-r--r--nova/compute/api.py29
-rw-r--r--nova/compute/manager.py33
-rw-r--r--nova/compute/power_state.py4
-rw-r--r--nova/crypto.py3
-rw-r--r--nova/db/sqlalchemy/api.py3
-rw-r--r--nova/test.py3
-rw-r--r--nova/tests/api/__init__.py81
-rw-r--r--nova/tests/api/openstack/__init__.py13
-rw-r--r--nova/tests/api/openstack/fakes.py22
-rw-r--r--nova/tests/api/openstack/test_auth.py4
-rw-r--r--nova/tests/api/openstack/test_images.py18
-rw-r--r--nova/tests/api/openstack/test_servers.py32
-rw-r--r--nova/tests/api/test.py81
-rw-r--r--nova/tests/api_integration.py54
-rw-r--r--nova/tests/db/__init__.py20
-rw-r--r--nova/tests/db/fakes.py75
-rw-r--r--nova/tests/test_access.py (renamed from nova/tests/access_unittest.py)0
-rw-r--r--nova/tests/test_api.py (renamed from nova/tests/api_unittest.py)0
-rw-r--r--nova/tests/test_auth.py (renamed from nova/tests/auth_unittest.py)0
-rw-r--r--nova/tests/test_cloud.py (renamed from nova/tests/cloud_unittest.py)0
-rw-r--r--nova/tests/test_compute.py (renamed from nova/tests/compute_unittest.py)12
-rw-r--r--nova/tests/test_flags.py (renamed from nova/tests/flags_unittest.py)0
-rw-r--r--nova/tests/test_middleware.py (renamed from nova/tests/middleware_unittest.py)0
-rw-r--r--nova/tests/test_misc.py (renamed from nova/tests/misc_unittest.py)8
-rw-r--r--nova/tests/test_network.py (renamed from nova/tests/network_unittest.py)0
-rw-r--r--nova/tests/test_quota.py (renamed from nova/tests/quota_unittest.py)0
-rw-r--r--nova/tests/test_rpc.py (renamed from nova/tests/rpc_unittest.py)0
-rw-r--r--nova/tests/test_scheduler.py (renamed from nova/tests/scheduler_unittest.py)2
-rw-r--r--nova/tests/test_service.py (renamed from nova/tests/service_unittest.py)14
-rw-r--r--nova/tests/test_twistd.py (renamed from nova/tests/twistd_unittest.py)0
-rw-r--r--nova/tests/test_virt.py (renamed from nova/tests/virt_unittest.py)28
-rw-r--r--nova/tests/test_volume.py (renamed from nova/tests/volume_unittest.py)0
-rw-r--r--nova/tests/test_xenapi.py220
-rw-r--r--nova/tests/xenapi/__init__.py20
-rw-r--r--nova/tests/xenapi/stubs.py103
-rw-r--r--nova/utils.py3
-rw-r--r--nova/virt/fake.py12
-rw-r--r--nova/virt/libvirt_conn.py37
-rw-r--r--nova/virt/xenapi/__init__.py15
-rw-r--r--nova/virt/xenapi/fake.py389
-rw-r--r--nova/virt/xenapi/network_utils.py15
-rw-r--r--nova/virt/xenapi/vm_utils.py178
-rw-r--r--nova/virt/xenapi/vmops.py111
-rw-r--r--nova/virt/xenapi/volume_utils.py266
-rw-r--r--nova/virt/xenapi/volumeops.py101
-rw-r--r--nova/virt/xenapi_conn.py105
-rw-r--r--nova/wsgi.py2
-rw-r--r--plugins/xenserver/doc/networking.rst144
-rwxr-xr-xplugins/xenserver/networking/etc/init.d/host-rules106
-rw-r--r--plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch22
-rwxr-xr-xplugins/xenserver/networking/etc/xensource/scripts/vif_rules.py119
-rw-r--r--plugins/xenserver/xenapi/README (renamed from plugins/xenapi/README)0
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore (renamed from plugins/xenapi/etc/xapi.d/plugins/objectstore)41
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py (renamed from plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py)0
-rw-r--r--run_tests.py126
-rwxr-xr-xrun_tests.sh16
-rw-r--r--setup.py1
-rwxr-xr-xtools/clean-vlans2
-rwxr-xr-xtools/setup_iptables.sh2
80 files changed, 2661 insertions, 734 deletions
diff --git a/.mailmap b/.mailmap
index 9ab7db743..9e7fb1ec0 100644
--- a/.mailmap
+++ b/.mailmap
@@ -27,3 +27,5 @@
<vishvananda@gmail.com> <root@ubuntu>
<sleepsonthefloor@gmail.com> <root@tonbuntu>
<rlane@wikimedia.org> <laner@controller>
+<corywright@gmail.com> <cory.wright@rackspace.com>
+<ant@openstack.org> <amesserl@rackspace.com>
diff --git a/Authors b/Authors
index 250b3b2ad..edd0ff77e 100644
--- a/Authors
+++ b/Authors
@@ -1,9 +1,12 @@
Andy Smith <code@term.ie>
Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
+Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Chris Behrens <cbehrens@codestud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
+Cory Wright <corywright@gmail.com>
+David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
@@ -26,6 +29,7 @@ Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
Ryan Lane <rlane@wikimedia.org>
Ryan Lucio <rlucio@internap.com>
+Salvatore Orlando <salvatore.orlando@eu.citrix.com>
Sandy Walsh <sandy.walsh@rackspace.com>
Soren Hansen <soren.hansen@rackspace.com>
Thierry Carrez <thierry@openstack.org>
@@ -34,3 +38,4 @@ Trey Morris <trey.morris@rackspace.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
+
diff --git a/bin/nova-combined b/bin/nova-combined
index c6a04f7e9..53322f1a0 100755
--- a/bin/nova-combined
+++ b/bin/nova-combined
@@ -22,6 +22,7 @@
import eventlet
eventlet.monkey_patch()
+import gettext
import os
import sys
@@ -33,6 +34,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
+gettext.install('nova', unicode=1)
+
from nova import api
from nova import flags
from nova import service
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 81b9b6dd3..828aba3d1 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -110,7 +110,6 @@ def main():
FLAGS.num_networks = 5
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..',
- '_trial_temp',
'nova.sqlite'))
FLAGS.sql_connection = 'sqlite:///%s' % path
action = argv[1]
diff --git a/contrib/puppet/files/production/nova-iptables b/contrib/puppet/files/production/nova-iptables
index b7b52df87..61e2ca2b9 100755
--- a/contrib/puppet/files/production/nova-iptables
+++ b/contrib/puppet/files/production/nova-iptables
@@ -30,6 +30,8 @@ if [ -f /etc/default/nova-iptables ] ; then
. /etc/default/nova-iptables
fi
+export LC_ALL=C
+
API_PORT=${API_PORT:-"8773"}
if [ ! -n "$IP" ]; then
diff --git a/nova/api/__init__.py b/nova/api/__init__.py
index 803470570..26fed847b 100644
--- a/nova/api/__init__.py
+++ b/nova/api/__init__.py
@@ -24,6 +24,7 @@ Root WSGI middleware for all API controllers.
:ec2api_subdomain: subdomain running the EC2 API (default: ec2)
"""
+import logging
import routes
import webob.dec
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index de95ee548..bebcdc18c 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -45,10 +45,14 @@ from nova.auth import manager
FLAGS = flags.FLAGS
-flags.DEFINE_string('nova_api_auth',
- 'nova.api.openstack.auth.BasicApiAuthManager',
+flags.DEFINE_string('os_api_auth',
+ 'nova.api.openstack.auth.AuthMiddleware',
'The auth mechanism to use for the OpenStack API implemenation')
+flags.DEFINE_string('os_api_ratelimiting',
+ 'nova.api.openstack.ratelimiting.RateLimitingMiddleware',
+ 'Default ratelimiting implementation for the Openstack API')
+
flags.DEFINE_bool('allow_admin_api',
False,
'When True, this API service will accept admin operations.')
@@ -58,7 +62,10 @@ class API(wsgi.Middleware):
"""WSGI entry point for all OpenStack API requests."""
def __init__(self):
- app = AuthMiddleware(RateLimitingMiddleware(APIRouter()))
+ auth_middleware = utils.import_class(FLAGS.os_api_auth)
+ ratelimiting_middleware = \
+ utils.import_class(FLAGS.os_api_ratelimiting)
+ app = auth_middleware(ratelimiting_middleware(APIRouter()))
super(API, self).__init__(app)
@webob.dec.wsgify
@@ -67,101 +74,11 @@ class API(wsgi.Middleware):
return req.get_response(self.application)
except Exception as ex:
logging.warn(_("Caught error: %s") % str(ex))
- logging.debug(traceback.format_exc())
+ logging.error(traceback.format_exc())
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
return faults.Fault(exc)
-class AuthMiddleware(wsgi.Middleware):
- """Authorize the openstack API request or return an HTTP Forbidden."""
-
- def __init__(self, application):
- self.auth_driver = utils.import_class(FLAGS.nova_api_auth)()
- super(AuthMiddleware, self).__init__(application)
-
- @webob.dec.wsgify
- def __call__(self, req):
- if 'X-Auth-Token' not in req.headers:
- return self.auth_driver.authenticate(req)
-
- user = self.auth_driver.authorize_token(req.headers["X-Auth-Token"])
-
- if not user:
- return faults.Fault(webob.exc.HTTPUnauthorized())
-
- req.environ['nova.context'] = context.RequestContext(user, user)
- return self.application
-
-
-class RateLimitingMiddleware(wsgi.Middleware):
- """Rate limit incoming requests according to the OpenStack rate limits."""
-
- def __init__(self, application, service_host=None):
- """Create a rate limiting middleware that wraps the given application.
-
- By default, rate counters are stored in memory. If service_host is
- specified, the middleware instead relies on the ratelimiting.WSGIApp
- at the given host+port to keep rate counters.
- """
- super(RateLimitingMiddleware, self).__init__(application)
- if not service_host:
- #TODO(gundlach): These limits were based on limitations of Cloud
- #Servers. We should revisit them in Nova.
- self.limiter = ratelimiting.Limiter(limits={
- 'DELETE': (100, ratelimiting.PER_MINUTE),
- 'PUT': (10, ratelimiting.PER_MINUTE),
- 'POST': (10, ratelimiting.PER_MINUTE),
- 'POST servers': (50, ratelimiting.PER_DAY),
- 'GET changes-since': (3, ratelimiting.PER_MINUTE),
- })
- else:
- self.limiter = ratelimiting.WSGIAppProxy(service_host)
-
- @webob.dec.wsgify
- def __call__(self, req):
- """Rate limit the request.
-
- If the request should be rate limited, return a 413 status with a
- Retry-After header giving the time when the request would succeed.
- """
- action_name = self.get_action_name(req)
- if not action_name:
- # Not rate limited
- return self.application
- delay = self.get_delay(action_name,
- req.environ['nova.context'].user_id)
- if delay:
- # TODO(gundlach): Get the retry-after format correct.
- exc = webob.exc.HTTPRequestEntityTooLarge(
- explanation=_('Too many requests.'),
- headers={'Retry-After': time.time() + delay})
- raise faults.Fault(exc)
- return self.application
-
- def get_delay(self, action_name, username):
- """Return the delay for the given action and username, or None if
- the action would not be rate limited.
- """
- if action_name == 'POST servers':
- # "POST servers" is a POST, so it counts against "POST" too.
- # Attempt the "POST" first, lest we are rate limited by "POST" but
- # use up a precious "POST servers" call.
- delay = self.limiter.perform("POST", username=username)
- if delay:
- return delay
- return self.limiter.perform(action_name, username=username)
-
- def get_action_name(self, req):
- """Return the action name for this request."""
- if req.method == 'GET' and 'changes-since' in req.GET:
- return 'GET changes-since'
- if req.method == 'POST' and req.path_info.startswith('/servers'):
- return 'POST servers'
- if req.method in ['PUT', 'POST', 'DELETE']:
- return req.method
- return None
-
-
class APIRouter(wsgi.Router):
"""
Routes requests on the OpenStack API to the appropriate controller
@@ -176,6 +93,8 @@ class APIRouter(wsgi.Router):
logging.debug("Including admin operations in API.")
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
+ server_members['suspend'] = 'POST'
+ server_members['resume'] = 'POST'
mapper.resource("server", "servers", controller=servers.Controller(),
collection={'detail': 'GET'},
@@ -194,22 +113,3 @@ class APIRouter(wsgi.Router):
controller=sharedipgroups.Controller())
super(APIRouter, self).__init__(mapper)
-
-
-def limited(items, req):
- """Return a slice of items according to requested offset and limit.
-
- items - a sliceable
- req - wobob.Request possibly containing offset and limit GET variables.
- offset is where to start in the list, and limit is the maximum number
- of items to return.
-
- If limit is not specified, 0, or > 1000, defaults to 1000.
- """
- offset = int(req.GET.get('offset', 0))
- limit = int(req.GET.get('limit', 0))
- if not limit:
- limit = 1000
- limit = min(1000, limit)
- range_end = offset + limit
- return items[offset:range_end]
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index fcda97ab1..e24e58fd3 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -1,3 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.import datetime
+
import datetime
import hashlib
import json
@@ -7,29 +24,45 @@ import webob.exc
import webob.dec
from nova import auth
+from nova import context
from nova import db
from nova import flags
from nova import manager
from nova import utils
+from nova import wsgi
from nova.api.openstack import faults
FLAGS = flags.FLAGS
-class Context(object):
- pass
-
-
-class BasicApiAuthManager(object):
- """ Implements a somewhat rudimentary version of OpenStack Auth"""
+class AuthMiddleware(wsgi.Middleware):
+ """Authorize the openstack API request or return an HTTP Forbidden."""
- def __init__(self, db_driver=None):
+ def __init__(self, application, db_driver=None):
if not db_driver:
db_driver = FLAGS.db_driver
self.db = utils.import_object(db_driver)
self.auth = auth.manager.AuthManager()
- self.context = Context()
- super(BasicApiAuthManager, self).__init__()
+ super(AuthMiddleware, self).__init__(application)
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ if not self.has_authentication(req):
+ return self.authenticate(req)
+
+ user = self.get_user_by_authentication(req)
+
+ if not user:
+ return faults.Fault(webob.exc.HTTPUnauthorized())
+
+ req.environ['nova.context'] = context.RequestContext(user, user)
+ return self.application
+
+ def has_authentication(self, req):
+ return 'X-Auth-Token' in req.headers
+
+ def get_user_by_authentication(self, req):
+ return self.authorize_token(req.headers["X-Auth-Token"])
def authenticate(self, req):
# Unless the request is explicitly made against /<version>/ don't
@@ -68,11 +101,12 @@ class BasicApiAuthManager(object):
This method will also remove the token if the timestamp is older than
2 days ago.
"""
- token = self.db.auth_get_token(self.context, token_hash)
+ ctxt = context.get_admin_context()
+ token = self.db.auth_get_token(ctxt, token_hash)
if token:
delta = datetime.datetime.now() - token.created_at
if delta.days >= 2:
- self.db.auth_destroy_token(self.context, token)
+ self.db.auth_destroy_token(ctxt, token)
else:
return self.auth.get_user(token.user_id)
return None
@@ -84,6 +118,7 @@ class BasicApiAuthManager(object):
key - string API key
req - webob.Request object
"""
+ ctxt = context.get_admin_context()
user = self.auth.get_user_from_access_key(key)
if user and user.name == username:
token_hash = hashlib.sha1('%s%s%f' % (username, key,
@@ -95,6 +130,6 @@ class BasicApiAuthManager(object):
token_dict['server_management_url'] = req.url
token_dict['storage_url'] = ''
token_dict['user_id'] = user.id
- token = self.db.auth_create_token(self.context, token_dict)
+ token = self.db.auth_create_token(ctxt, token_dict)
return token, user
return None, None
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
new file mode 100644
index 000000000..ac0572c96
--- /dev/null
+++ b/nova/api/openstack/common.py
@@ -0,0 +1,36 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+def limited(items, req):
+ """Return a slice of items according to requested offset and limit.
+
+ items - a sliceable
+ req - wobob.Request possibly containing offset and limit GET variables.
+ offset is where to start in the list, and limit is the maximum number
+ of items to return.
+
+ If limit is not specified, 0, or > 1000, defaults to 1000.
+ """
+
+ offset = int(req.GET.get('offset', 0))
+ limit = int(req.GET.get('limit', 0))
+ if not limit:
+ limit = 1000
+ limit = min(1000, limit)
+ range_end = offset + limit
+ return items[offset:range_end]
diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py
index f23f74fd1..f620d4107 100644
--- a/nova/api/openstack/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -18,6 +18,7 @@
from webob import exc
from nova.api.openstack import faults
+from nova.api.openstack import common
from nova.compute import instance_types
from nova import wsgi
import nova.api.openstack
@@ -39,7 +40,7 @@ class Controller(wsgi.Controller):
def detail(self, req):
"""Return all flavors in detail."""
items = [self.show(req, id)['flavor'] for id in self._all_ids()]
- items = nova.api.openstack.limited(items, req)
+ items = common.limited(items, req)
return dict(flavors=items)
def show(self, req, id):
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 4a0a8e6f1..ba35fbc78 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -22,12 +22,73 @@ from nova import utils
from nova import wsgi
import nova.api.openstack
import nova.image.service
+
+from nova.api.openstack import common
from nova.api.openstack import faults
FLAGS = flags.FLAGS
+def _translate_keys(item):
+ """
+ Maps key names to Rackspace-like attributes for return
+ also pares down attributes to those we want
+ item is a dict
+
+ Note: should be removed when the set of keys expected by the api
+ and the set of keys returned by the image service are equivalent
+
+ """
+ # TODO(tr3buchet): this map is specific to s3 object store,
+ # replace with a list of keys for _filter_keys later
+ mapped_keys = {'status': 'imageState',
+ 'id': 'imageId',
+ 'name': 'imageLocation'}
+
+ mapped_item = {}
+ # TODO(tr3buchet):
+ # this chunk of code works with s3 and the local image service/glance
+ # when we switch to glance/local image service it can be replaced with
+ # a call to _filter_keys, and mapped_keys can be changed to a list
+ try:
+ for k, v in mapped_keys.iteritems():
+ # map s3 fields
+ mapped_item[k] = item[v]
+ except KeyError:
+ # return only the fields api expects
+ mapped_item = _filter_keys(item, mapped_keys.keys())
+
+ return mapped_item
+
+
+def _translate_status(item):
+ """
+ Translates status of image to match current Rackspace api bindings
+ item is a dict
+
+ Note: should be removed when the set of statuses expected by the api
+ and the set of statuses returned by the image service are equivalent
+
+ """
+ status_mapping = {
+ 'pending': 'queued',
+ 'decrypting': 'preparing',
+ 'untarring': 'saving',
+ 'available': 'active'}
+ item['status'] = status_mapping[item['status']]
+ return item
+
+
+def _filter_keys(item, keys):
+ """
+ Filters all model attributes except for keys
+ item is a dict
+
+ """
+ return dict((k, v) for k, v in item.iteritems() if k in keys)
+
+
class Controller(wsgi.Controller):
_serialization_metadata = {
@@ -40,24 +101,25 @@ class Controller(wsgi.Controller):
self._service = utils.import_object(FLAGS.image_service)
def index(self, req):
- """Return all public images in brief."""
- return dict(images=[dict(id=img['id'], name=img['name'])
- for img in self.detail(req)['images']])
+ """Return all public images in brief"""
+ items = self._service.index(req.environ['nova.context'])
+ items = common.limited(items, req)
+ items = [_filter_keys(item, ('id', 'name')) for item in items]
+ return dict(images=items)
def detail(self, req):
- """Return all public images in detail."""
+ """Return all public images in detail"""
try:
- images = self._service.detail(req.environ['nova.context'])
- images = nova.api.openstack.limited(images, req)
+ items = self._service.detail(req.environ['nova.context'])
except NotImplementedError:
- # Emulate detail() using repeated calls to show()
- images = self._service.index(ctxt)
- images = nova.api.openstack.limited(images, req)
- images = [self._service.show(ctxt, i['id']) for i in images]
- return dict(images=images)
+ items = self._service.index(req.environ['nova.context'])
+ items = common.limited(items, req)
+ items = [_translate_keys(item) for item in items]
+ items = [_translate_status(item) for item in items]
+ return dict(images=items)
def show(self, req, id):
- """Return data about the given image id."""
+ """Return data about the given image id"""
return dict(image=self._service.show(req.environ['nova.context'], id))
def delete(self, req, id):
diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py
index 918caf055..91a8b2e55 100644
--- a/nova/api/openstack/ratelimiting/__init__.py
+++ b/nova/api/openstack/ratelimiting/__init__.py
@@ -1,3 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.import datetime
+
"""Rate limiting of arbitrary actions."""
import httplib
@@ -6,6 +23,8 @@ import urllib
import webob.dec
import webob.exc
+from nova import wsgi
+from nova.api.openstack import faults
# Convenience constants for the limits dictionary passed to Limiter().
PER_SECOND = 1
@@ -14,6 +33,83 @@ PER_HOUR = 60 * 60
PER_DAY = 60 * 60 * 24
+class RateLimitingMiddleware(wsgi.Middleware):
+ """Rate limit incoming requests according to the OpenStack rate limits."""
+
+ def __init__(self, application, service_host=None):
+ """Create a rate limiting middleware that wraps the given application.
+
+ By default, rate counters are stored in memory. If service_host is
+ specified, the middleware instead relies on the ratelimiting.WSGIApp
+ at the given host+port to keep rate counters.
+ """
+ if not service_host:
+ #TODO(gundlach): These limits were based on limitations of Cloud
+ #Servers. We should revisit them in Nova.
+ self.limiter = Limiter(limits={
+ 'DELETE': (100, PER_MINUTE),
+ 'PUT': (10, PER_MINUTE),
+ 'POST': (10, PER_MINUTE),
+ 'POST servers': (50, PER_DAY),
+ 'GET changes-since': (3, PER_MINUTE),
+ })
+ else:
+ self.limiter = WSGIAppProxy(service_host)
+ super(RateLimitingMiddleware, self).__init__(application)
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ """Rate limit the request.
+
+ If the request should be rate limited, return a 413 status with a
+ Retry-After header giving the time when the request would succeed.
+ """
+ return self.limited_request(req, self.application)
+
+ def limited_request(self, req, application):
+ """Rate limit the request.
+
+ If the request should be rate limited, return a 413 status with a
+ Retry-After header giving the time when the request would succeed.
+ """
+ action_name = self.get_action_name(req)
+ if not action_name:
+ # Not rate limited
+ return application
+ delay = self.get_delay(action_name,
+ req.environ['nova.context'].user_id)
+ if delay:
+ # TODO(gundlach): Get the retry-after format correct.
+ exc = webob.exc.HTTPRequestEntityTooLarge(
+ explanation=('Too many requests.'),
+ headers={'Retry-After': time.time() + delay})
+ raise faults.Fault(exc)
+ return application
+
+ def get_delay(self, action_name, username):
+ """Return the delay for the given action and username, or None if
+ the action would not be rate limited.
+ """
+ if action_name == 'POST servers':
+ # "POST servers" is a POST, so it counts against "POST" too.
+ # Attempt the "POST" first, lest we are rate limited by "POST" but
+ # use up a precious "POST servers" call.
+ delay = self.limiter.perform("POST", username=username)
+ if delay:
+ return delay
+ return self.limiter.perform(action_name, username=username)
+
+ def get_action_name(self, req):
+ """Return the action name for this request."""
+ if req.method == 'GET' and 'changes-since' in req.GET:
+ return 'GET changes-since'
+ if req.method == 'POST' and req.path_info.startswith('/servers'):
+ return 'POST servers'
+ if req.method in ['PUT', 'POST', 'DELETE']:
+ return req.method
+ return None
+
+
class Limiter(object):
"""Class providing rate limiting of arbitrary actions."""
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 45db89cbf..849d0bb93 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -22,6 +22,7 @@ from webob import exc
from nova import exception
from nova import wsgi
+from nova.api.openstack import common
from nova.api.openstack import faults
from nova.auth import manager as auth_manager
from nova.compute import api as compute_api
@@ -45,7 +46,8 @@ def _entity_detail(inst):
power_state.NOSTATE: 'build',
power_state.RUNNING: 'active',
power_state.BLOCKED: 'active',
- power_state.PAUSED: 'suspended',
+ power_state.SUSPENDED: 'suspended',
+ power_state.PAUSED: 'error',
power_state.SHUTDOWN: 'active',
power_state.SHUTOFF: 'active',
power_state.CRASHED: 'error'}
@@ -98,7 +100,7 @@ class Controller(wsgi.Controller):
"""
instance_list = self.compute_api.get_instances(
req.environ['nova.context'])
- limited_list = nova.api.openstack.limited(instance_list, req)
+ limited_list = common.limited(instance_list, req)
res = [entity_maker(inst)['server'] for inst in limited_list]
return _entity_list(res)
@@ -181,7 +183,7 @@ class Controller(wsgi.Controller):
self.compute_api.pause(ctxt, id)
except:
readable = traceback.format_exc()
- logging.error("Compute.api::pause %s", readable)
+ logging.error(_("Compute.api::pause %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -192,7 +194,29 @@ class Controller(wsgi.Controller):
self.compute_api.unpause(ctxt, id)
except:
readable = traceback.format_exc()
- logging.error("Compute.api::unpause %s", readable)
+ logging.error(_("Compute.api::unpause %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ def suspend(self, req, id):
+ """permit admins to suspend the server"""
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.suspend(context, id)
+ except:
+ readable = traceback.format_exc()
+ logging.error(_("compute.api::suspend %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ def resume(self, req, id):
+ """permit admins to resume the server from suspend"""
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.resume(context, id)
+ except:
+ readable = traceback.format_exc()
+ logging.error(_("compute.api::resume %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/sharedipgroups.py
index e805ca9f7..75d02905c 100644
--- a/nova/api/openstack/sharedipgroups.py
+++ b/nova/api/openstack/sharedipgroups.py
@@ -19,4 +19,22 @@ from nova import wsgi
class Controller(wsgi.Controller):
- pass
+ """ The Shared IP Groups Controller for the Openstack API """
+
+ def index(self, req):
+ raise NotImplementedError
+
+ def show(self, req, id):
+ raise NotImplementedError
+
+ def update(self, req, id):
+ raise NotImplementedError
+
+ def delete(self, req, id):
+ raise NotImplementedError
+
+ def detail(self, req):
+ raise NotImplementedError
+
+ def create(self, req):
+ raise NotImplementedError
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 33cd03430..4466051f0 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -150,6 +150,9 @@ def _match(key, value, attrs):
"""Match a given key and value against an attribute list."""
if key not in attrs:
return False
+ # This is a wild card search. Implemented as all or nothing for now.
+ if value == "*":
+ return True
if key != "objectclass":
return value in attrs[key]
# it is an objectclass check, so check subclasses
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index e289ea5a2..7616ff112 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -32,11 +32,16 @@ from nova import flags
FLAGS = flags.FLAGS
+flags.DEFINE_integer('ldap_schema_version', 2,
+ 'Current version of the LDAP schema')
flags.DEFINE_string('ldap_url', 'ldap://localhost',
'Point this at your ldap server')
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
'DN of admin user')
+flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id')
+flags.DEFINE_string('ldap_user_name_attribute', 'cn',
+ 'Attribute to use as name')
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
'OU for Users')
@@ -73,10 +78,20 @@ class LdapDriver(object):
Defines enter and exit and therefore supports the with/as syntax.
"""
+ project_pattern = '(owner=*)'
+ isadmin_attribute = 'isNovaAdmin'
+ project_attribute = 'owner'
+ project_objectclass = 'groupOfNames'
+
def __init__(self):
"""Imports the LDAP module"""
self.ldap = __import__('ldap')
self.conn = None
+ if FLAGS.ldap_schema_version == 1:
+ LdapDriver.project_pattern = '(objectclass=novaProject)'
+ LdapDriver.isadmin_attribute = 'isAdmin'
+ LdapDriver.project_attribute = 'projectManager'
+ LdapDriver.project_objectclass = 'novaProject'
def __enter__(self):
"""Creates the connection to LDAP"""
@@ -104,13 +119,13 @@ class LdapDriver(object):
"""Retrieve project by id"""
dn = 'cn=%s,%s' % (pid,
FLAGS.ldap_project_subtree)
- attr = self.__find_object(dn, '(objectclass=novaProject)')
+ attr = self.__find_object(dn, LdapDriver.project_pattern)
return self.__to_project(attr)
def get_users(self):
"""Retrieve list of users"""
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
- '(objectclass=novaUser)')
+ '(objectclass=novaUser)')
users = []
for attr in attrs:
user = self.__to_user(attr)
@@ -120,7 +135,7 @@ class LdapDriver(object):
def get_projects(self, uid=None):
"""Retrieve list of projects"""
- pattern = '(objectclass=novaProject)'
+ pattern = LdapDriver.project_pattern
if uid:
pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid))
attrs = self.__find_objects(FLAGS.ldap_project_subtree,
@@ -139,23 +154,25 @@ class LdapDriver(object):
# Malformed entries are useless, replace attributes found.
attr = []
if 'secretKey' in user.keys():
- attr.append((self.ldap.MOD_REPLACE, 'secretKey', \
- [secret_key]))
+ attr.append((self.ldap.MOD_REPLACE, 'secretKey',
+ [secret_key]))
else:
- attr.append((self.ldap.MOD_ADD, 'secretKey', \
- [secret_key]))
+ attr.append((self.ldap.MOD_ADD, 'secretKey',
+ [secret_key]))
if 'accessKey' in user.keys():
- attr.append((self.ldap.MOD_REPLACE, 'accessKey', \
- [access_key]))
+ attr.append((self.ldap.MOD_REPLACE, 'accessKey',
+ [access_key]))
else:
- attr.append((self.ldap.MOD_ADD, 'accessKey', \
- [access_key]))
- if 'isAdmin' in user.keys():
- attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \
- [str(is_admin).upper()]))
+ attr.append((self.ldap.MOD_ADD, 'accessKey',
+ [access_key]))
+ if LdapDriver.isadmin_attribute in user.keys():
+ attr.append((self.ldap.MOD_REPLACE,
+ LdapDriver.isadmin_attribute,
+ [str(is_admin).upper()]))
else:
- attr.append((self.ldap.MOD_ADD, 'isAdmin', \
- [str(is_admin).upper()]))
+ attr.append((self.ldap.MOD_ADD,
+ LdapDriver.isadmin_attribute,
+ [str(is_admin).upper()]))
self.conn.modify_s(self.__uid_to_dn(name), attr)
return self.get_user(name)
else:
@@ -168,12 +185,12 @@ class LdapDriver(object):
'inetOrgPerson',
'novaUser']),
('ou', [FLAGS.ldap_user_unit]),
- ('uid', [name]),
+ (FLAGS.ldap_user_id_attribute, [name]),
('sn', [name]),
- ('cn', [name]),
+ (FLAGS.ldap_user_name_attribute, [name]),
('secretKey', [secret_key]),
('accessKey', [access_key]),
- ('isAdmin', [str(is_admin).upper()]),
+ (LdapDriver.isadmin_attribute, [str(is_admin).upper()]),
]
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
@@ -204,10 +221,10 @@ class LdapDriver(object):
if not manager_dn in members:
members.append(manager_dn)
attr = [
- ('objectclass', ['novaProject']),
+ ('objectclass', [LdapDriver.project_objectclass]),
('cn', [name]),
('description', [description]),
- ('projectManager', [manager_dn]),
+ (LdapDriver.project_attribute, [manager_dn]),
('member', members)]
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
return self.__to_project(dict(attr))
@@ -223,7 +240,8 @@ class LdapDriver(object):
"manager %s doesn't exist")
% manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
- attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn))
+ attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute,
+ manager_dn))
if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description))
self.conn.modify_s('cn=%s,%s' % (project_id,
@@ -283,10 +301,9 @@ class LdapDriver(object):
return roles
else:
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
- roles = self.__find_objects(project_dn,
- '(&(&(objectclass=groupOfNames)'
- '(!(objectclass=novaProject)))'
- '(member=%s))' % self.__uid_to_dn(uid))
+ query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
+ (LdapDriver.project_pattern, self.__uid_to_dn(uid)))
+ roles = self.__find_objects(project_dn, query)
return [role['cn'][0] for role in roles]
def delete_user(self, uid):
@@ -300,14 +317,15 @@ class LdapDriver(object):
# Retrieve user by name
user = self.__get_ldap_user(uid)
if 'secretKey' in user.keys():
- attr.append((self.ldap.MOD_DELETE, 'secretKey', \
- user['secretKey']))
+ attr.append((self.ldap.MOD_DELETE, 'secretKey',
+ user['secretKey']))
if 'accessKey' in user.keys():
- attr.append((self.ldap.MOD_DELETE, 'accessKey', \
- user['accessKey']))
- if 'isAdmin' in user.keys():
- attr.append((self.ldap.MOD_DELETE, 'isAdmin', \
- user['isAdmin']))
+ attr.append((self.ldap.MOD_DELETE, 'accessKey',
+ user['accessKey']))
+ if LdapDriver.isadmin_attribute in user.keys():
+ attr.append((self.ldap.MOD_DELETE,
+ LdapDriver.isadmin_attribute,
+ user[LdapDriver.isadmin_attribute]))
self.conn.modify_s(self.__uid_to_dn(uid), attr)
else:
# Delete entry
@@ -329,7 +347,8 @@ class LdapDriver(object):
if secret_key:
attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key))
if admin is not None:
- attr.append((self.ldap.MOD_REPLACE, 'isAdmin', str(admin).upper()))
+ attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute,
+ str(admin).upper()))
self.conn.modify_s(self.__uid_to_dn(uid), attr)
def __user_exists(self, uid):
@@ -347,7 +366,7 @@ class LdapDriver(object):
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
attr = self.__find_object(self.__uid_to_dn(uid),
- '(objectclass=novaUser)')
+ '(objectclass=novaUser)')
return attr
def __find_object(self, dn, query=None, scope=None):
@@ -383,19 +402,21 @@ class LdapDriver(object):
def __find_role_dns(self, tree):
"""Find dns of role objects in given tree"""
- return self.__find_dns(tree,
- '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
+ query = ('(&(objectclass=groupOfNames)(!%s))' %
+ LdapDriver.project_pattern)
+ return self.__find_dns(tree, query)
def __find_group_dns_with_member(self, tree, uid):
"""Find dns of group objects in a given tree that contain member"""
- dns = self.__find_dns(tree,
- '(&(objectclass=groupOfNames)(member=%s))' %
- self.__uid_to_dn(uid))
+ query = ('(&(objectclass=groupOfNames)(member=%s))' %
+ self.__uid_to_dn(uid))
+ dns = self.__find_dns(tree, query)
return dns
def __group_exists(self, dn):
"""Check if group exists"""
- return self.__find_object(dn, '(objectclass=groupOfNames)') is not None
+ query = '(objectclass=groupOfNames)'
+ return self.__find_object(dn, query) is not None
@staticmethod
def __role_to_dn(role, project_id=None):
@@ -417,9 +438,9 @@ class LdapDriver(object):
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
- raise exception.NotFound(_("Group can't be created "
- "because user %s doesn't exist")
- % member_uid)
+ raise exception.NotFound("Group can't be created "
+ "because user %s doesn't exist" %
+ member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
@@ -434,9 +455,8 @@ class LdapDriver(object):
def __is_in_group(self, uid, group_dn):
"""Check if user is in group"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be searched in group "
- "because the user doesn't exist")
- % uid)
+ raise exception.NotFound("User %s can't be searched in group "
+ "because the user doesn't exist" % uid)
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
@@ -447,12 +467,11 @@ class LdapDriver(object):
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be added to the group "
- "because the user doesn't exist")
- % uid)
+ raise exception.NotFound("User %s can't be added to the group "
+ "because the user doesn't exist" % uid)
if not self.__group_exists(group_dn):
- raise exception.NotFound(_("The group at dn %s doesn't exist")
- % group_dn)
+ raise exception.NotFound("The group at dn %s doesn't exist" %
+ group_dn)
if self.__is_in_group(uid, group_dn):
raise exception.Duplicate(_("User %s is already a member of "
"the group %s") % (uid, group_dn))
@@ -462,18 +481,17 @@ class LdapDriver(object):
def __remove_from_group(self, uid, group_dn):
"""Remove user from group"""
if not self.__group_exists(group_dn):
- raise exception.NotFound(_("The group at dn %s doesn't exist")
- % group_dn)
+ raise exception.NotFound("The group at dn %s doesn't exist" %
+ group_dn)
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be removed from the "
- "group because the user doesn't exist")
- % uid)
+ raise exception.NotFound("User %s can't be removed from the "
+ "group because the user doesn't exist" %
+ uid)
if not self.__is_in_group(uid, group_dn):
- raise exception.NotFound(_("User %s is not a member of the group")
- % uid)
+ raise exception.NotFound("User %s is not a member of the group" %
+ uid)
# NOTE(vish): remove user from group and any sub_groups
- sub_dns = self.__find_group_dns_with_member(
- group_dn, uid)
+ sub_dns = self.__find_group_dns_with_member(group_dn, uid)
for sub_dn in sub_dns:
self.__safe_remove_from_group(uid, sub_dn)
@@ -491,9 +509,8 @@ class LdapDriver(object):
def __remove_from_all(self, uid):
"""Remove user from all roles and projects"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be removed from all "
- "because the user doesn't exist")
- % uid)
+ raise exception.NotFound("User %s can't be removed from all "
+ "because the user doesn't exist" % uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
@@ -521,13 +538,13 @@ class LdapDriver(object):
if attr is None:
return None
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \
- and 'isAdmin' in attr.keys()):
+ and LdapDriver.isadmin_attribute in attr.keys()):
return {
- 'id': attr['uid'][0],
- 'name': attr['cn'][0],
+ 'id': attr[FLAGS.ldap_user_id_attribute][0],
+ 'name': attr[FLAGS.ldap_user_name_attribute][0],
'access': attr['accessKey'][0],
'secret': attr['secretKey'][0],
- 'admin': (attr['isAdmin'][0] == 'TRUE')}
+ 'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')}
else:
return None
@@ -539,7 +556,8 @@ class LdapDriver(object):
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
- 'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]),
+ 'project_manager_id':
+ self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
@@ -549,9 +567,10 @@ class LdapDriver(object):
return dn.split(',')[0].split('=')[1]
@staticmethod
- def __uid_to_dn(dn):
+ def __uid_to_dn(uid):
"""Convert uid to dn"""
- return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree)
+ return (FLAGS.ldap_user_id_attribute + '=%s,%s'
+ % (uid, FLAGS.ldap_user_subtree))
class FakeLdapDriver(LdapDriver):
diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema
index 4047361de..539a5c42d 100644
--- a/nova/auth/nova_openldap.schema
+++ b/nova/auth/nova_openldap.schema
@@ -1,7 +1,9 @@
#
# Person object for Nova
# inetorgperson with extra attributes
-# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
+# Schema version: 2
+# Authors: Vishvananda Ishaya <vishvananda@gmail.com>
+# Ryan Lane <rlane@wikimedia.org>
#
#
@@ -31,54 +33,18 @@ attributetype (
)
attributetype (
- novaAttrs:3
- NAME 'keyFingerprint'
- DESC 'Fingerprint of private key'
- EQUALITY caseIgnoreMatch
- SUBSTR caseIgnoreSubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
- SINGLE-VALUE
- )
-
-attributetype (
novaAttrs:4
- NAME 'isAdmin'
- DESC 'Is user an administrator?'
+ NAME 'isNovaAdmin'
+ DESC 'Is user an nova administrator?'
EQUALITY booleanMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
SINGLE-VALUE
)
-attributetype (
- novaAttrs:5
- NAME 'projectManager'
- DESC 'Project Managers of a project'
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
- )
-
objectClass (
novaOCs:1
NAME 'novaUser'
DESC 'access and secret keys'
AUXILIARY
- MUST ( uid )
- MAY ( accessKey $ secretKey $ isAdmin )
- )
-
-objectClass (
- novaOCs:2
- NAME 'novaKeyPair'
- DESC 'Key pair for User'
- SUP top
- STRUCTURAL
- MUST ( cn $ sshPublicKey $ keyFingerprint )
- )
-
-objectClass (
- novaOCs:3
- NAME 'novaProject'
- DESC 'Container for project'
- SUP groupOfNames
- STRUCTURAL
- MUST ( cn $ projectManager )
+ MAY ( accessKey $ secretKey $ isNovaAdmin )
)
diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema
index e925e05e4..4a6a78839 100644
--- a/nova/auth/nova_sun.schema
+++ b/nova/auth/nova_sun.schema
@@ -1,16 +1,13 @@
#
# Person object for Nova
# inetorgperson with extra attributes
-# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
-# Modified for strict RFC 4512 compatibility by: Ryan Lane <ryan@ryandlane.com>
+# Schema version: 2
+# Authors: Vishvananda Ishaya <vishvananda@gmail.com>
+# Ryan Lane <rlane@wikimedia.org>
#
# using internet experimental oid arc as per BP64 3.1
dn: cn=schema
attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
-attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE)
-attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
-attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )
-objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) )
-objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) )
-objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) )
+attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
+objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) )
diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh
index 8052c077d..1a280e5a8 100755
--- a/nova/auth/opendj.sh
+++ b/nova/auth/opendj.sh
@@ -32,7 +32,6 @@ abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
schemapath='/var/opendj/instance/config/schema'
cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif
cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif
-chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif
chown opendj:opendj $schemapath/98-nova_sun.ldif
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh
index 797675d2e..95c61dafd 100755
--- a/nova/auth/slap.sh
+++ b/nova/auth/slap.sh
@@ -22,7 +22,7 @@ apt-get install -y slapd ldap-utils python-ldap
abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema
-cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema
+cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
@@ -33,7 +33,6 @@ cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
include /etc/ldap/schema/core.schema
include /etc/ldap/schema/cosine.schema
include /etc/ldap/schema/inetorgperson.schema
-include /etc/ldap/schema/openssh-lpk_openldap.schema
include /etc/ldap/schema/nova.schema
pidfile /var/run/slapd/slapd.pid
argsfile /var/run/slapd/slapd.args
diff --git a/nova/cloudpipe/bootscript.template b/nova/cloudpipe/bootscript.template
index 11578c134..94dea3f87 100755
--- a/nova/cloudpipe/bootscript.template
+++ b/nova/cloudpipe/bootscript.template
@@ -19,6 +19,7 @@
# This gets zipped and run on the cloudpipe-managed OpenVPN server
+export LC_ALL=C
export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{print $$1}'`
export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $$1}'`
export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $$1}'`
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 64310f92b..38b99a3e3 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -91,15 +91,16 @@ class ComputeAPI(base.Base):
is_vpn = image_id == FLAGS.vpn_image_id
if not is_vpn:
image = self.image_service.show(context, image_id)
-
- # If kernel_id/ramdisk_id isn't explicitly set in API call
- # we take the defaults from the image's metadata
if kernel_id is None:
kernel_id = image.get('kernelId', None)
if ramdisk_id is None:
ramdisk_id = image.get('ramdiskId', None)
-
- # Make sure we have access to kernel and ramdisk
+ #No kernel and ramdisk for raw images
+ if kernel_id == str(FLAGS.null_kernel):
+ kernel_id = None
+ ramdisk_id = None
+ logging.debug("Creating a raw instance")
+ # Make sure we have access to kernel and ramdisk (if not raw)
if kernel_id:
self.image_service.show(context, kernel_id)
if ramdisk_id:
@@ -283,6 +284,24 @@ class ComputeAPI(base.Base):
{"method": "unpause_instance",
"args": {"instance_id": instance['id']}})
+ def suspend(self, context, instance_id):
+ """suspend the instance with instance_id"""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "suspend_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def resume(self, context, instance_id):
+ """resume the instance with instance_id"""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "resume_instance",
+ "args": {"instance_id": instance['id']}})
+
def rescue(self, context, instance_id):
"""Rescue the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 8efe9dc81..5f237eda9 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -297,6 +297,39 @@ class ComputeManager(manager.Manager):
result))
@exception.wrap_exception
+ def suspend_instance(self, context, instance_id):
+ """suspend the instance with instance_id"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ logging.debug(_('instance %s: suspending'),
+ instance_ref['internal_id'])
+ self.db.instance_set_state(context, instance_id,
+ power_state.NOSTATE,
+ 'suspending')
+ self.driver.suspend(instance_ref,
+ lambda result: self._update_state_callback(self,
+ context,
+ instance_id,
+ result))
+
+ @exception.wrap_exception
+ def resume_instance(self, context, instance_id):
+ """resume the suspended instance with instance_id"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ logging.debug(_('instance %s: resuming'), instance_ref['internal_id'])
+ self.db.instance_set_state(context, instance_id,
+ power_state.NOSTATE,
+ 'resuming')
+ self.driver.resume(instance_ref,
+ lambda result: self._update_state_callback(self,
+ context,
+ instance_id,
+ result))
+
+ @exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
context = context.elevated()
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
index cefdf2d9e..37039d2ec 100644
--- a/nova/compute/power_state.py
+++ b/nova/compute/power_state.py
@@ -26,6 +26,7 @@ PAUSED = 0x03
SHUTDOWN = 0x04
SHUTOFF = 0x05
CRASHED = 0x06
+SUSPENDED = 0x07
def name(code):
@@ -36,5 +37,6 @@ def name(code):
PAUSED: 'paused',
SHUTDOWN: 'shutdown',
SHUTOFF: 'shutdown',
- CRASHED: 'crashed'}
+ CRASHED: 'crashed',
+ SUSPENDED: 'suspended'}
return d[code]
diff --git a/nova/crypto.py b/nova/crypto.py
index e4133ac85..b8405552d 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -22,6 +22,7 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
"""
import base64
+import gettext
import hashlib
import logging
import os
@@ -33,6 +34,8 @@ import utils
import M2Crypto
+gettext.install('nova', unicode=1)
+
from nova import context
from nova import db
from nova import flags
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 52d0c389d..7e945e4cb 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1175,11 +1175,13 @@ def iscsi_target_create_safe(context, values):
###################
+@require_admin_context
def auth_destroy_token(_context, token):
session = get_session()
session.delete(token)
+@require_admin_context
def auth_get_token(_context, token_hash):
session = get_session()
tk = session.query(models.AuthToken).\
@@ -1190,6 +1192,7 @@ def auth_get_token(_context, token_hash):
return tk
+@require_admin_context
def auth_create_token(_context, token):
tk = models.AuthToken()
tk.update(token)
diff --git a/nova/test.py b/nova/test.py
index 7076f1bf4..db5826c04 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -38,9 +38,12 @@ from nova import fakerabbit
from nova import flags
from nova import rpc
from nova.network import manager as network_manager
+from nova.tests import fake_flags
FLAGS = flags.FLAGS
+flags.DEFINE_bool('flush_db', True,
+ 'Flush the database before running fake tests')
flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing')
diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py
index 9caa8c9d0..e69de29bb 100644
--- a/nova/tests/api/__init__.py
+++ b/nova/tests/api/__init__.py
@@ -1,81 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test for the root WSGI middleware for all API controllers.
-"""
-
-import unittest
-
-import stubout
-import webob
-import webob.dec
-
-import nova.exception
-from nova import api
-from nova.tests.api.fakes import APIStub
-
-
-class Test(unittest.TestCase):
-
- def setUp(self):
- self.stubs = stubout.StubOutForTesting()
-
- def tearDown(self):
- self.stubs.UnsetAll()
-
- def _request(self, url, subdomain, **kwargs):
- environ_keys = {'HTTP_HOST': '%s.example.com' % subdomain}
- environ_keys.update(kwargs)
- req = webob.Request.blank(url, environ_keys)
- return req.get_response(api.API('ec2'))
-
- def test_openstack(self):
- self.stubs.Set(api.openstack, 'API', APIStub)
- result = self._request('/v1.0/cloud', 'api')
- self.assertEqual(result.body, "/cloud")
-
- def test_ec2(self):
- self.stubs.Set(api.ec2, 'API', APIStub)
- result = self._request('/services/cloud', 'ec2')
- self.assertEqual(result.body, "/cloud")
-
- def test_not_found(self):
- self.stubs.Set(api.ec2, 'API', APIStub)
- self.stubs.Set(api.openstack, 'API', APIStub)
- result = self._request('/test/cloud', 'ec2')
- self.assertNotEqual(result.body, "/cloud")
-
- def test_query_api_versions(self):
- result = self._request('/', 'api')
- self.assertTrue('CURRENT' in result.body)
-
- def test_metadata(self):
- def go(url):
- result = self._request(url, 'ec2', REMOTE_ADDR='128.192.151.2')
- # Each should get to the ORM layer and fail to find the IP
- self.assertRaises(nova.exception.NotFound, go, '/latest/')
- self.assertRaises(nova.exception.NotFound, go, '/2009-04-04/')
- self.assertRaises(nova.exception.NotFound, go, '/1.0/')
-
- def test_ec2_root(self):
- result = self._request('/', 'ec2')
- self.assertTrue('2007-12-15\n' in result.body)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py
index 2e357febe..9e183bd0d 100644
--- a/nova/tests/api/openstack/__init__.py
+++ b/nova/tests/api/openstack/__init__.py
@@ -17,11 +17,16 @@
import unittest
-from nova.api.openstack import limited
-from nova.api.openstack import RateLimitingMiddleware
+from nova import context
+from nova import flags
+from nova.api.openstack.ratelimiting import RateLimitingMiddleware
+from nova.api.openstack.common import limited
from nova.tests.api.fakes import APIStub
+from nova import utils
from webob import Request
+FLAGS = flags.FLAGS
+
class RateLimitingMiddlewareTest(unittest.TestCase):
@@ -46,6 +51,8 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
def exhaust(self, middleware, method, url, username, times):
req = Request.blank(url, dict(REQUEST_METHOD=method),
headers={'X-Auth-User': username})
+ req.environ['nova.context'] = context.RequestContext(username,
+ username)
for i in range(times):
resp = req.get_response(middleware)
self.assertEqual(resp.status_int, 200)
@@ -62,7 +69,7 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
middleware = RateLimitingMiddleware(APIStub())
self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 10)
self.exhaust(middleware, 'POST', '/images/4', 'usr2', 10)
- self.assertTrue(set(middleware.limiter._levels) ==
+ self.assertTrue(set(middleware.limiter._levels) == \
set(['usr1:POST', 'usr1:POST servers', 'usr2:POST']))
def test_POST_servers_action_correctly_ratelimited(self):
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 21b8aac1c..79663e43a 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -29,8 +29,11 @@ from nova import exception as exc
from nova import flags
from nova import utils
import nova.api.openstack.auth
-from nova.image import service
+from nova.api.openstack import auth
+from nova.api.openstack import ratelimiting
from nova.image import glance
+from nova.image import local
+from nova.image import service
from nova.tests import fake_flags
from nova.wsgi import Router
@@ -51,10 +54,11 @@ class FakeRouter(Router):
return res
-def fake_auth_init(self):
+def fake_auth_init(self, application):
self.db = FakeAuthDatabase()
self.context = Context()
self.auth = FakeAuthManager()
+ self.application = application
@webob.dec.wsgify
@@ -75,28 +79,28 @@ def stub_out_image_service(stubs):
def fake_image_show(meh, context, id):
return dict(kernelId=1, ramdiskId=1)
- stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show)
+ stubs.Set(local.LocalImageService, 'show', fake_image_show)
def stub_out_auth(stubs):
def fake_auth_init(self, app):
self.application = app
- stubs.Set(nova.api.openstack.AuthMiddleware,
+ stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fake_auth_init)
- stubs.Set(nova.api.openstack.AuthMiddleware,
+ stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__call__', fake_wsgi)
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
- super(nova.api.openstack.RateLimitingMiddleware, self).__init__(app)
+ super(ratelimiting.RateLimitingMiddleware, self).__init__(app)
self.application = app
- stubs.Set(nova.api.openstack.RateLimitingMiddleware,
+ stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware,
'__init__', fake_rate_init)
- stubs.Set(nova.api.openstack.RateLimitingMiddleware,
+ stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware,
'__call__', fake_wsgi)
@@ -173,7 +177,7 @@ class FakeToken(object):
class FakeRequestContext(object):
- def __init__(self, user, project):
+ def __init__(self, user, project, *args, **kwargs):
self.user_id = 1
self.project_id = 1
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index 7b427c2db..489a1dfbf 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -34,7 +34,7 @@ class Test(unittest.TestCase):
def setUp(self):
self.stubs = stubout.StubOutForTesting()
- self.stubs.Set(nova.api.openstack.auth.BasicApiAuthManager,
+ self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
fakes.FakeAuthManager.auth_data = {}
@@ -131,7 +131,7 @@ class Test(unittest.TestCase):
class TestLimiter(unittest.TestCase):
def setUp(self):
self.stubs = stubout.StubOutForTesting()
- self.stubs.Set(nova.api.openstack.auth.BasicApiAuthManager,
+ self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
fakes.FakeAuthManager.auth_data = {}
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index f610cbf9c..1b4031217 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -223,6 +223,20 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
res = req.get_response(nova.api.API('os'))
res_dict = json.loads(res.body)
+ def _is_equivalent_subset(x, y):
+ if set(x) <= set(y):
+ for k, v in x.iteritems():
+ if x[k] != y[k]:
+ if x[k] == 'active' and y[k] == 'available':
+ continue
+ return False
+ return True
+ return False
+
for image in res_dict['images']:
- self.assertEquals(1, self.IMAGE_FIXTURES.count(image),
- "image %s not in fixtures!" % str(image))
+ for image_fixture in self.IMAGE_FIXTURES:
+ if _is_equivalent_subset(image, image_fixture):
+ break
+ else:
+ self.assertEquals(1, 2, "image %s not in fixtures!" %
+ str(image))
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 3820f5f27..5d23db588 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -88,9 +88,13 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.db.api, 'instance_get_floating_address',
instance_address)
self.stubs.Set(nova.compute.api.ComputeAPI, 'pause',
- fake_compute_api)
+ fake_compute_api)
self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause',
- fake_compute_api)
+ fake_compute_api)
+ self.stubs.Set(nova.compute.api.ComputeAPI, 'suspend',
+ fake_compute_api)
+ self.stubs.Set(nova.compute.api.ComputeAPI, 'resume',
+ fake_compute_api)
self.allow_admin = FLAGS.allow_admin_api
def tearDown(self):
@@ -246,6 +250,30 @@ class ServersTest(unittest.TestCase):
res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status_int, 202)
+ def test_server_suspend(self):
+ FLAGS.allow_admin_api = True
+ body = dict(server=dict(
+ name='server_test', imageId=2, flavorId=2, metadata={},
+ personality={}))
+ req = webob.Request.blank('/v1.0/servers/1/suspend')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(nova.api.API('os'))
+ self.assertEqual(res.status_int, 202)
+
+ def test_server_resume(self):
+ FLAGS.allow_admin_api = True
+ body = dict(server=dict(
+ name='server_test', imageId=2, flavorId=2, metadata={},
+ personality={}))
+ req = webob.Request.blank('/v1.0/servers/1/resume')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(nova.api.API('os'))
+ self.assertEqual(res.status_int, 202)
+
def test_server_reboot(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
diff --git a/nova/tests/api/test.py b/nova/tests/api/test.py
new file mode 100644
index 000000000..9caa8c9d0
--- /dev/null
+++ b/nova/tests/api/test.py
@@ -0,0 +1,81 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test for the root WSGI middleware for all API controllers.
+"""
+
+import unittest
+
+import stubout
+import webob
+import webob.dec
+
+import nova.exception
+from nova import api
+from nova.tests.api.fakes import APIStub
+
+
+class Test(unittest.TestCase):
+
+ def setUp(self):
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+
+ def _request(self, url, subdomain, **kwargs):
+ environ_keys = {'HTTP_HOST': '%s.example.com' % subdomain}
+ environ_keys.update(kwargs)
+ req = webob.Request.blank(url, environ_keys)
+ return req.get_response(api.API('ec2'))
+
+ def test_openstack(self):
+ self.stubs.Set(api.openstack, 'API', APIStub)
+ result = self._request('/v1.0/cloud', 'api')
+ self.assertEqual(result.body, "/cloud")
+
+ def test_ec2(self):
+ self.stubs.Set(api.ec2, 'API', APIStub)
+ result = self._request('/services/cloud', 'ec2')
+ self.assertEqual(result.body, "/cloud")
+
+ def test_not_found(self):
+ self.stubs.Set(api.ec2, 'API', APIStub)
+ self.stubs.Set(api.openstack, 'API', APIStub)
+ result = self._request('/test/cloud', 'ec2')
+ self.assertNotEqual(result.body, "/cloud")
+
+ def test_query_api_versions(self):
+ result = self._request('/', 'api')
+ self.assertTrue('CURRENT' in result.body)
+
+ def test_metadata(self):
+ def go(url):
+ result = self._request(url, 'ec2', REMOTE_ADDR='128.192.151.2')
+ # Each should get to the ORM layer and fail to find the IP
+ self.assertRaises(nova.exception.NotFound, go, '/latest/')
+ self.assertRaises(nova.exception.NotFound, go, '/2009-04-04/')
+ self.assertRaises(nova.exception.NotFound, go, '/1.0/')
+
+ def test_ec2_root(self):
+ result = self._request('/', 'ec2')
+ self.assertTrue('2007-12-15\n' in result.body)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/nova/tests/api_integration.py b/nova/tests/api_integration.py
deleted file mode 100644
index 54403c655..000000000
--- a/nova/tests/api_integration.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import boto
-from boto.ec2.regioninfo import RegionInfo
-import unittest
-
-
-ACCESS_KEY = 'fake'
-SECRET_KEY = 'fake'
-CLC_IP = '127.0.0.1'
-CLC_PORT = 8773
-REGION = 'test'
-
-
-def get_connection():
- return boto.connect_ec2(
- aws_access_key_id=ACCESS_KEY,
- aws_secret_access_key=SECRET_KEY,
- is_secure=False,
- region=RegionInfo(None, REGION, CLC_IP),
- port=CLC_PORT,
- path='/services/Cloud',
- debug=99)
-
-
-class APIIntegrationTests(unittest.TestCase):
- def test_001_get_all_images(self):
- conn = get_connection()
- res = conn.get_all_images()
-
-
-if __name__ == '__main__':
- unittest.main()
-
-#print conn.get_all_key_pairs()
-#print conn.create_key_pair
-#print conn.create_security_group('name', 'description')
diff --git a/nova/tests/db/__init__.py b/nova/tests/db/__init__.py
new file mode 100644
index 000000000..2d43aac42
--- /dev/null
+++ b/nova/tests/db/__init__.py
@@ -0,0 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`db` -- Stubs for DB API
+=============================
+"""
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
new file mode 100644
index 000000000..05bdd172e
--- /dev/null
+++ b/nova/tests/db/fakes.py
@@ -0,0 +1,75 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Stubouts, mocks and fixtures for the test suite"""
+
+import time
+
+from nova import db
+from nova import utils
+from nova.compute import instance_types
+
+
+def stub_out_db_instance_api(stubs):
+ """ Stubs out the db API for creating Instances """
+
+ class FakeModel(object):
+ """ Stubs out for model """
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def fake_instance_create(values):
+ """ Stubs out the db.instance_create method """
+
+ type_data = instance_types.INSTANCE_TYPES[values['instance_type']]
+
+ base_options = {
+ 'name': values['name'],
+ 'id': values['id'],
+ 'reservation_id': utils.generate_uid('r'),
+ 'image_id': values['image_id'],
+ 'kernel_id': values['kernel_id'],
+ 'ramdisk_id': values['ramdisk_id'],
+ 'state_description': 'scheduling',
+ 'user_id': values['user_id'],
+ 'project_id': values['project_id'],
+ 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
+ 'instance_type': values['instance_type'],
+ 'memory_mb': type_data['memory_mb'],
+ 'mac_address': values['mac_address'],
+ 'vcpus': type_data['vcpus'],
+ 'local_gb': type_data['local_gb'],
+ }
+ return FakeModel(base_options)
+
+ def fake_network_get_by_instance(context, instance_id):
+ fields = {
+ 'bridge': 'xenbr0',
+ }
+ return FakeModel(fields)
+
+ stubs.Set(db, 'instance_create', fake_instance_create)
+ stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
diff --git a/nova/tests/access_unittest.py b/nova/tests/test_access.py
index 58fdea3b5..58fdea3b5 100644
--- a/nova/tests/access_unittest.py
+++ b/nova/tests/test_access.py
diff --git a/nova/tests/api_unittest.py b/nova/tests/test_api.py
index 33d4cb294..33d4cb294 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/test_api.py
diff --git a/nova/tests/auth_unittest.py b/nova/tests/test_auth.py
index 15d40bc53..15d40bc53 100644
--- a/nova/tests/auth_unittest.py
+++ b/nova/tests/test_auth.py
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/test_cloud.py
index 8e3391226..8e3391226 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/test_cloud.py
diff --git a/nova/tests/compute_unittest.py b/nova/tests/test_compute.py
index 529847972..11be81123 100644
--- a/nova/tests/compute_unittest.py
+++ b/nova/tests/test_compute.py
@@ -101,13 +101,13 @@ class ComputeTestCase(test.TestCase):
self.compute.run_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
- logging.info("Running instances: %s", instances)
+ logging.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
- logging.info("After terminating instances: %s", instances)
+ logging.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
@@ -136,6 +136,14 @@ class ComputeTestCase(test.TestCase):
self.compute.unpause_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
+ def test_suspend(self):
+ """ensure instance can be suspended"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.suspend_instance(self.context, instance_id)
+ self.compute.resume_instance(self.context, instance_id)
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_reboot(self):
"""Ensure instance can be rebooted"""
instance_id = self._create_instance()
diff --git a/nova/tests/flags_unittest.py b/nova/tests/test_flags.py
index 707300fcf..707300fcf 100644
--- a/nova/tests/flags_unittest.py
+++ b/nova/tests/test_flags.py
diff --git a/nova/tests/middleware_unittest.py b/nova/tests/test_middleware.py
index 0febf52d6..0febf52d6 100644
--- a/nova/tests/middleware_unittest.py
+++ b/nova/tests/test_middleware.py
diff --git a/nova/tests/misc_unittest.py b/nova/tests/test_misc.py
index 3d947427a..33c1777d5 100644
--- a/nova/tests/misc_unittest.py
+++ b/nova/tests/test_misc.py
@@ -22,13 +22,13 @@ from nova.utils import parse_mailmap, str_dict_replace
class ProjectTestCase(test.TestCase):
def test_authors_up_to_date(self):
- if os.path.exists('../.bzr'):
+ if os.path.exists('.bzr'):
contributors = set()
- mailmap = parse_mailmap('../.mailmap')
+ mailmap = parse_mailmap('.mailmap')
import bzrlib.workingtree
- tree = bzrlib.workingtree.WorkingTree.open('..')
+ tree = bzrlib.workingtree.WorkingTree.open('.')
tree.lock_read()
try:
parents = tree.get_parent_ids()
@@ -42,7 +42,7 @@ class ProjectTestCase(test.TestCase):
email = author.split(' ')[-1]
contributors.add(str_dict_replace(email, mailmap))
- authors_file = open('../Authors', 'r').read()
+ authors_file = open('Authors', 'r').read()
missing = set()
for contributor in contributors:
diff --git a/nova/tests/network_unittest.py b/nova/tests/test_network.py
index 96473ac7c..96473ac7c 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/test_network.py
diff --git a/nova/tests/quota_unittest.py b/nova/tests/test_quota.py
index 8cf2a5e54..8cf2a5e54 100644
--- a/nova/tests/quota_unittest.py
+++ b/nova/tests/test_quota.py
diff --git a/nova/tests/rpc_unittest.py b/nova/tests/test_rpc.py
index 6ea2edcab..6ea2edcab 100644
--- a/nova/tests/rpc_unittest.py
+++ b/nova/tests/test_rpc.py
diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/test_scheduler.py
index df5e7afa5..91517cc5d 100644
--- a/nova/tests/scheduler_unittest.py
+++ b/nova/tests/test_scheduler.py
@@ -48,7 +48,7 @@ class SchedulerTestCase(test.TestCase):
"""Test case for scheduler"""
def setUp(self):
super(SchedulerTestCase, self).setUp()
- self.flags(scheduler_driver='nova.tests.scheduler_unittest.TestDriver')
+ self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
def test_fallback(self):
scheduler = manager.SchedulerManager()
diff --git a/nova/tests/service_unittest.py b/nova/tests/test_service.py
index 47c092f8e..b30838ad7 100644
--- a/nova/tests/service_unittest.py
+++ b/nova/tests/test_service.py
@@ -30,7 +30,7 @@ from nova import service
from nova import manager
FLAGS = flags.FLAGS
-flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager",
+flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager",
"Manager for testing")
@@ -52,14 +52,14 @@ class ServiceManagerTestCase(test.TestCase):
serv = service.Service('test',
'test',
'test',
- 'nova.tests.service_unittest.FakeManager')
+ 'nova.tests.test_service.FakeManager')
self.assertRaises(AttributeError, getattr, serv, 'test_method')
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
'test',
- 'nova.tests.service_unittest.FakeManager')
+ 'nova.tests.test_service.FakeManager')
serv.start()
self.assertEqual(serv.test_method(), 'manager')
@@ -67,7 +67,7 @@ class ServiceManagerTestCase(test.TestCase):
serv = ExtendedService('test',
'test',
'test',
- 'nova.tests.service_unittest.FakeManager')
+ 'nova.tests.test_service.FakeManager')
serv.start()
self.assertEqual(serv.test_method(), 'service')
@@ -156,7 +156,7 @@ class ServiceTestCase(test.TestCase):
serv = service.Service(host,
binary,
topic,
- 'nova.tests.service_unittest.FakeManager')
+ 'nova.tests.test_service.FakeManager')
serv.start()
serv.report_state()
@@ -186,7 +186,7 @@ class ServiceTestCase(test.TestCase):
serv = service.Service(host,
binary,
topic,
- 'nova.tests.service_unittest.FakeManager')
+ 'nova.tests.test_service.FakeManager')
serv.start()
serv.report_state()
self.assert_(serv.model_disconnected)
@@ -219,7 +219,7 @@ class ServiceTestCase(test.TestCase):
serv = service.Service(host,
binary,
topic,
- 'nova.tests.service_unittest.FakeManager')
+ 'nova.tests.test_service.FakeManager')
serv.start()
serv.model_disconnected = True
serv.report_state()
diff --git a/nova/tests/twistd_unittest.py b/nova/tests/test_twistd.py
index 75007b9c8..75007b9c8 100644
--- a/nova/tests/twistd_unittest.py
+++ b/nova/tests/test_twistd.py
diff --git a/nova/tests/virt_unittest.py b/nova/tests/test_virt.py
index 9ad009510..4aa489d08 100644
--- a/nova/tests/virt_unittest.py
+++ b/nova/tests/test_virt.py
@@ -33,6 +33,7 @@ flags.DECLARE('instances_path', 'nova.compute.manager')
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
+ libvirt_conn._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
@@ -53,39 +54,37 @@ class LibvirtConnTestCase(test.TestCase):
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
- self.do_test_xml_and_uri(instance_data,
- expect_kernel=False, expect_ramdisk=False)
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
- self.do_test_xml_and_uri(instance_data,
- expect_kernel=True, expect_ramdisk=False)
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
- self.do_test_xml_and_uri(instance_data,
- expect_kernel=False, expect_ramdisk=False)
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
- self.do_test_xml_and_uri(instance_data,
- expect_kernel=True, expect_ramdisk=True)
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
- self.do_test_xml_and_uri(instance_data,
- expect_kernel=True, expect_ramdisk=True,
- rescue=True)
+ self._check_xml_and_uri(instance_data, expect_kernel=True,
+ expect_ramdisk=True, rescue=True)
- def do_test_xml_and_uri(self, instance,
- expect_ramdisk, expect_kernel,
- rescue=False):
+ def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
+ rescue=False):
user_context = context.RequestContext(project=self.project,
user=self.user)
instance_ref = db.instance_create(user_context, instance)
@@ -159,7 +158,6 @@ class LibvirtConnTestCase(test.TestCase):
(lambda t: t.find('./devices/serial/source').get(
'path').split('/')[1], 'console.log'),
(lambda t: t.find('./memory').text, '2097152')]
-
if rescue:
common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get(
diff --git a/nova/tests/volume_unittest.py b/nova/tests/test_volume.py
index b13455fb0..b13455fb0 100644
--- a/nova/tests/volume_unittest.py
+++ b/nova/tests/test_volume.py
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
new file mode 100644
index 000000000..ed2e4ffde
--- /dev/null
+++ b/nova/tests/test_xenapi.py
@@ -0,0 +1,220 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for XenAPI
+"""
+
+import stubout
+
+from nova import db
+from nova import context
+from nova import flags
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.compute import instance_types
+from nova.compute import power_state
+from nova.virt import xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi import volume_utils
+from nova.tests.db import fakes
+from nova.tests.xenapi import stubs
+
+FLAGS = flags.FLAGS
+
+
+class XenAPIVolumeTestCase(test.TestCase):
+ """
+ Unit tests for Volume operations
+ """
+ def setUp(self):
+ super(XenAPIVolumeTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ FLAGS.target_host = '127.0.0.1'
+ FLAGS.xenapi_connection_url = 'test_url'
+ FLAGS.xenapi_connection_password = 'test_pass'
+ fakes.stub_out_db_instance_api(self.stubs)
+ stubs.stub_out_get_target(self.stubs)
+ fake.reset()
+ self.values = {'name': 1, 'id': 1,
+ 'project_id': 'fake',
+ 'user_id': 'fake',
+ 'image_id': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ }
+
+ def _create_volume(self, size='0'):
+ """Create a volume object."""
+ vol = {}
+ vol['size'] = size
+ vol['user_id'] = 'fake'
+ vol['project_id'] = 'fake'
+ vol['host'] = 'localhost'
+ vol['availability_zone'] = FLAGS.storage_availability_zone
+ vol['status'] = "creating"
+ vol['attach_status'] = "detached"
+ return db.volume_create(context.get_admin_context(), vol)
+
+ def test_create_iscsi_storage(self):
+ """ This shows how to test helper classes' methods """
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ helper = volume_utils.VolumeHelper
+ helper.XenAPI = session.get_imported_xenapi()
+ vol = self._create_volume()
+ info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc')
+ label = 'SR-%s' % vol['ec2_id']
+ description = 'Test-SR'
+ sr_ref = helper.create_iscsi_storage(session, info, label, description)
+ srs = fake.get_all('SR')
+ self.assertEqual(sr_ref, srs[0])
+ db.volume_destroy(context.get_admin_context(), vol['id'])
+
+ def test_parse_volume_info_raise_exception(self):
+ """ This shows how to test helper classes' methods """
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ helper = volume_utils.VolumeHelper
+ helper.XenAPI = session.get_imported_xenapi()
+ vol = self._create_volume()
+ # oops, wrong mount point!
+ self.assertRaises(volume_utils.StorageError,
+ helper.parse_volume_info,
+ vol['ec2_id'],
+ '/dev/sd')
+ db.volume_destroy(context.get_admin_context(), vol['id'])
+
+ def test_attach_volume(self):
+ """ This shows how to test Ops classes' methods """
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
+ conn = xenapi_conn.get_connection(False)
+ volume = self._create_volume()
+ instance = db.instance_create(self.values)
+ fake.create_vm(instance.name, 'Running')
+ result = conn.attach_volume(instance.name, volume['ec2_id'],
+ '/dev/sdc')
+
+ def check():
+ # check that the VM has a VBD attached to it
+ # Get XenAPI reference for the VM
+ vms = fake.get_all('VM')
+ # Get XenAPI record for VBD
+ vbds = fake.get_all('VBD')
+ vbd = fake.get_record('VBD', vbds[0])
+ vm_ref = vbd['VM']
+ self.assertEqual(vm_ref, vms[0])
+
+ check()
+
+ def test_attach_volume_raise_exception(self):
+ """ This shows how to test when exceptions are raised """
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForVolumeFailedTests)
+ conn = xenapi_conn.get_connection(False)
+ volume = self._create_volume()
+ instance = db.instance_create(self.values)
+ fake.create_vm(instance.name, 'Running')
+ self.assertRaises(Exception,
+ conn.attach_volume,
+ instance.name,
+ volume['ec2_id'],
+ '/dev/sdc')
+
+ def tearDown(self):
+ super(XenAPIVolumeTestCase, self).tearDown()
+ self.stubs.UnsetAll()
+
+
+class XenAPIVMTestCase(test.TestCase):
+ """
+ Unit tests for VM operations
+ """
+ def setUp(self):
+ super(XenAPIVMTestCase, self).setUp()
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.stubs = stubout.StubOutForTesting()
+ FLAGS.xenapi_connection_url = 'test_url'
+ FLAGS.xenapi_connection_password = 'test_pass'
+ fake.reset()
+ fakes.stub_out_db_instance_api(self.stubs)
+ fake.create_network('fake', FLAGS.flat_network_bridge)
+
+ def test_list_instances_0(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ conn = xenapi_conn.get_connection(False)
+ instances = conn.list_instances()
+ self.assertEquals(instances, [])
+
+ def test_spawn(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ values = {'name': 1, 'id': 1,
+ 'project_id': self.project.id,
+ 'user_id': self.user.id,
+ 'image_id': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ }
+ conn = xenapi_conn.get_connection(False)
+ instance = db.instance_create(values)
+ conn.spawn(instance)
+
+ def check():
+ instances = conn.list_instances()
+ self.assertEquals(instances, [1])
+
+ # Get Nova record for VM
+ vm_info = conn.get_info(1)
+
+ # Get XenAPI record for VM
+ vms = fake.get_all('VM')
+ vm = fake.get_record('VM', vms[0])
+
+ # Check that m1.large above turned into the right thing.
+ instance_type = instance_types.INSTANCE_TYPES['m1.large']
+ mem_kib = long(instance_type['memory_mb']) << 10
+ mem_bytes = str(mem_kib << 10)
+ vcpus = instance_type['vcpus']
+ self.assertEquals(vm_info['max_mem'], mem_kib)
+ self.assertEquals(vm_info['mem'], mem_kib)
+ self.assertEquals(vm['memory_static_max'], mem_bytes)
+ self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
+ self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
+ self.assertEquals(vm['VCPUs_max'], str(vcpus))
+ self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
+
+ # Check that the VM is running according to Nova
+ self.assertEquals(vm_info['state'], power_state.RUNNING)
+
+ # Check that the VM is running according to XenAPI.
+ self.assertEquals(vm['power_state'], 'Running')
+
+ check()
+
+ def tearDown(self):
+ super(XenAPIVMTestCase, self).tearDown()
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ self.stubs.UnsetAll()
diff --git a/nova/tests/xenapi/__init__.py b/nova/tests/xenapi/__init__.py
new file mode 100644
index 000000000..1dd02bdc1
--- /dev/null
+++ b/nova/tests/xenapi/__init__.py
@@ -0,0 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`xenapi` -- Stubs for XenAPI
+=================================
+"""
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
new file mode 100644
index 000000000..a7e592fee
--- /dev/null
+++ b/nova/tests/xenapi/stubs.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Stubouts, mocks and fixtures for the test suite"""
+
+from nova.virt import xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi import volume_utils
+
+
+def stubout_session(stubs, cls):
+ """Stubs out two methods from XenAPISession"""
+ def fake_import(self):
+ """Stubs out get_imported_xenapi of XenAPISession"""
+ fake_module = 'nova.virt.xenapi.fake'
+ from_list = ['fake']
+ return __import__(fake_module, globals(), locals(), from_list, -1)
+
+ stubs.Set(xenapi_conn.XenAPISession, '_create_session',
+ lambda s, url: cls(url))
+ stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi',
+ fake_import)
+
+
+def stub_out_get_target(stubs):
+ """Stubs out _get_target in volume_utils"""
+ def fake_get_target(volume_id):
+ return (None, None)
+
+ stubs.Set(volume_utils, '_get_target', fake_get_target)
+
+
+class FakeSessionForVMTests(fake.SessionBase):
+ """ Stubs out a XenAPISession for VM tests """
+ def __init__(self, uri):
+ super(FakeSessionForVMTests, self).__init__(uri)
+
+ def network_get_all_records_where(self, _1, _2):
+ return self.xenapi.network.get_all_records()
+
+ def host_call_plugin(self, _1, _2, _3, _4, _5):
+ return ''
+
+ def VM_start(self, _1, ref, _2, _3):
+ vm = fake.get_record('VM', ref)
+ if vm['power_state'] != 'Halted':
+ raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
+ vm['power_state']])
+ vm['power_state'] = 'Running'
+ vm['is_a_template'] = False
+ vm['is_control_domain'] = False
+
+
+class FakeSessionForVolumeTests(fake.SessionBase):
+ """ Stubs out a XenAPISession for Volume tests """
+ def __init__(self, uri):
+ super(FakeSessionForVolumeTests, self).__init__(uri)
+
+ def VBD_plug(self, _1, ref):
+ rec = fake.get_record('VBD', ref)
+ rec['currently-attached'] = True
+
+ def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
+ _6, _7, _8, _9, _10, _11):
+ valid_vdi = False
+ refs = fake.get_all('VDI')
+ for ref in refs:
+ rec = fake.get_record('VDI', ref)
+ if rec['uuid'] == uuid:
+ valid_vdi = True
+ if not valid_vdi:
+ raise fake.Failure([['INVALID_VDI', 'session', self._session]])
+
+
+class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
+ """ Stubs out a XenAPISession for Volume tests: it injects failures """
+ def __init__(self, uri):
+ super(FakeSessionForVolumeFailedTests, self).__init__(uri)
+
+ def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
+ _6, _7, _8, _9, _10, _11):
+ # This is for testing failure
+ raise fake.Failure([['INVALID_VDI', 'session', self._session]])
+
+ def PBD_unplug(self, _1, ref):
+ rec = fake.get_record('PBD', ref)
+ rec['currently-attached'] = False
+
+ def SR_forget(self, _1, ref):
+ pass
diff --git a/nova/utils.py b/nova/utils.py
index bbc55627c..8cbf59b2d 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -48,7 +48,8 @@ def import_class(import_str):
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
- except (ImportError, ValueError, AttributeError):
+ except (ImportError, ValueError, AttributeError), exc:
+ logging.debug(_('Inner Exception: %s'), exc)
raise exception.NotFound(_('Class %s cannot be found') % class_str)
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index b97dbd511..a700e35c1 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -148,6 +148,18 @@ class FakeConnection(object):
"""
pass
+ def suspend(self, instance, callback):
+ """
+ suspend the specified instance
+ """
+ pass
+
+ def resume(self, instance, callback):
+ """
+ resume the specified instance
+ """
+ pass
+
def destroy(self, instance):
"""
Destroy (shutdown and delete) the specified instance.
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 40a430d86..dc878846d 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -63,10 +63,9 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import images
-from Cheetah.Template import Template
-
libvirt = None
libxml2 = None
+Template = None
FLAGS = flags.FLAGS
@@ -96,15 +95,26 @@ flags.DEFINE_string('ajaxterm_portrange',
def get_connection(read_only):
# These are loaded late so that there's no need to install these
# libraries when not using libvirt.
+ # Cheetah is separate because the unit tests want to load Cheetah,
+ # but not libvirt.
global libvirt
global libxml2
if libvirt is None:
libvirt = __import__('libvirt')
if libxml2 is None:
libxml2 = __import__('libxml2')
+ _late_load_cheetah()
return LibvirtConnection(read_only)
+def _late_load_cheetah():
+ global Template
+ if Template is None:
+ t = __import__('Cheetah.Template', globals(), locals(), ['Template'],
+ -1)
+ Template = t.Template
+
+
def _get_net_and_mask(cidr):
net = IPy.IP(cidr)
return str(net.net()), str(net.netmask())
@@ -288,6 +298,14 @@ class LibvirtConnection(object):
raise exception.APIError("unpause not supported for libvirt.")
@exception.wrap_exception
+ def suspend(self, instance, callback):
+ raise exception.APIError("suspend not supported for libvirt")
+
+ @exception.wrap_exception
+ def resume(self, instance, callback):
+ raise exception.APIError("resume not supported for libvirt")
+
+ @exception.wrap_exception
def rescue(self, instance):
self.destroy(instance, False)
@@ -559,9 +577,10 @@ class LibvirtConnection(object):
if FLAGS.allow_project_net_traffic:
net, mask = _get_net_and_mask(network['cidr'])
- extra_params = ("<parameter name=\"PROJNET\" value=\"%s\" />\n"
- "<parameter name=\"PROJMASK\" value=\"%s\" />\n"
- ) % (net, mask)
+ extra_params = ("<parameter name=\"PROJNET\" "
+ "value=\"%s\" />\n"
+ "<parameter name=\"PROJMASK\" "
+ "value=\"%s\" />\n") % (net, mask)
else:
extra_params = "\n"
@@ -847,8 +866,8 @@ class NWFilterFirewall(object):
the base filter are all in place.
"""
- nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n"
- ) % instance['name']
+ nwfilter_xml = ("<filter name='nova-instance-%s' "
+ "chain='root'>\n") % instance['name']
if instance['image_id'] == FLAGS.vpn_image_id:
nwfilter_xml += " <filterref filter='nova-vpn' />\n"
@@ -861,8 +880,8 @@ class NWFilterFirewall(object):
for security_group in instance.security_groups:
self.ensure_security_group_filter(security_group['id'])
- nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n"
- ) % security_group['id']
+ nwfilter_xml += (" <filterref filter='nova-secgroup-%d' "
+ "/>\n") % security_group['id']
nwfilter_xml += "</filter>"
self._define_filter(nwfilter_xml)
diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py
index 3d598c463..c75162f08 100644
--- a/nova/virt/xenapi/__init__.py
+++ b/nova/virt/xenapi/__init__.py
@@ -13,3 +13,18 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+"""
+:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI
+==================================================================
+"""
+
+
+class HelperBase(object):
+ """
+ The base for helper classes. This adds the XenAPI class attribute
+ """
+ XenAPI = None
+
+ def __init__(self):
+ return
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
new file mode 100644
index 000000000..1eaf31c25
--- /dev/null
+++ b/nova/virt/xenapi/fake.py
@@ -0,0 +1,389 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+#============================================================================
+#
+# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
+# interface included in the Python distribution.
+#
+# Copyright (c) 1999-2002 by Secret Labs AB
+# Copyright (c) 1999-2002 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+
+"""
+A fake XenAPI SDK.
+"""
+
+
+import datetime
+import logging
+import uuid
+
+from nova import exception
+
+
+_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
+ 'PBD', 'VDI', 'VIF', 'VM', 'task']
+
+_db_content = {}
+
+
+def reset():
+ for c in _CLASSES:
+ _db_content[c] = {}
+ create_host('fake')
+
+
+def create_host(name_label):
+ return _create_object('host', {
+ 'name_label': name_label,
+ })
+
+
+def create_network(name_label, bridge):
+ return _create_object('network', {
+ 'name_label': name_label,
+ 'bridge': bridge,
+ })
+
+
+def create_vm(name_label, status,
+ is_a_template=False, is_control_domain=False):
+ return _create_object('VM', {
+ 'name_label': name_label,
+ 'power-state': status,
+ 'is_a_template': is_a_template,
+ 'is_control_domain': is_control_domain,
+ })
+
+
+def create_vdi(name_label, read_only, sr_ref, sharable):
+ return _create_object('VDI', {
+ 'name_label': name_label,
+ 'read_only': read_only,
+ 'SR': sr_ref,
+ 'type': '',
+ 'name_description': '',
+ 'sharable': sharable,
+ 'other_config': {},
+ 'location': '',
+ 'xenstore_data': '',
+ 'sm_config': {},
+ 'VBDs': {},
+ })
+
+
+def create_pbd(config, sr_ref, attached):
+ return _create_object('PBD', {
+ 'device-config': config,
+ 'SR': sr_ref,
+ 'currently-attached': attached,
+ })
+
+
+def create_task(name_label):
+ return _create_object('task', {
+ 'name_label': name_label,
+ 'status': 'pending',
+ })
+
+
+def _create_object(table, obj):
+ ref = str(uuid.uuid4())
+ obj['uuid'] = str(uuid.uuid4())
+ _db_content[table][ref] = obj
+ return ref
+
+
+def _create_sr(table, obj):
+ sr_type = obj[6]
+ # Forces fake to support iscsi only
+ if sr_type != 'iscsi':
+ raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
+ sr_ref = _create_object(table, obj[2])
+ vdi_ref = create_vdi('', False, sr_ref, False)
+ pbd_ref = create_pbd('', sr_ref, True)
+ _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
+ _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
+ _db_content['VDI'][vdi_ref]['SR'] = sr_ref
+ _db_content['PBD'][pbd_ref]['SR'] = sr_ref
+ return sr_ref
+
+
+def get_all(table):
+ return _db_content[table].keys()
+
+
+def get_all_records(table):
+ return _db_content[table]
+
+
+def get_record(table, ref):
+ if ref in _db_content[table]:
+ return _db_content[table].get(ref)
+ else:
+ raise Failure(['HANDLE_INVALID', table, ref])
+
+
+def check_for_session_leaks():
+ if len(_db_content['session']) > 0:
+ raise exception.Error('Sessions have leaked: %s' %
+ _db_content['session'])
+
+
+class Failure(Exception):
+ def __init__(self, details):
+ self.details = details
+
+ def __str__(self):
+ try:
+ return str(self.details)
+ except Exception, exc:
+ return "XenAPI Fake Failure: %s" % str(self.details)
+
+ def _details_map(self):
+ return dict([(str(i), self.details[i])
+ for i in range(len(self.details))])
+
+
+class SessionBase(object):
+ """
+ Base class for Fake Sessions
+ """
+
+ def __init__(self, uri):
+ self._session = None
+
+ def xenapi_request(self, methodname, params):
+ if methodname.startswith('login'):
+ self._login(methodname, params)
+ return None
+ elif methodname == 'logout' or methodname == 'session.logout':
+ self._logout()
+ return None
+ else:
+ full_params = (self._session,) + params
+ meth = getattr(self, methodname, None)
+ if meth is None:
+ logging.warn('Raising NotImplemented')
+ raise NotImplementedError(
+ 'xenapi.fake does not have an implementation for %s' %
+ methodname)
+ return meth(*full_params)
+
+ def _login(self, method, params):
+ self._session = str(uuid.uuid4())
+ _db_content['session'][self._session] = {
+ 'uuid': str(uuid.uuid4()),
+ 'this_host': _db_content['host'].keys()[0],
+ }
+
+ def _logout(self):
+ s = self._session
+ self._session = None
+ if s not in _db_content['session']:
+ raise exception.Error(
+ "Logging out a session that is invalid or already logged "
+ "out: %s" % s)
+ del _db_content['session'][s]
+
+ def __getattr__(self, name):
+ if name == 'handle':
+ return self._session
+ elif name == 'xenapi':
+ return _Dispatcher(self.xenapi_request, None)
+ elif name.startswith('login') or name.startswith('slave_local'):
+ return lambda *params: self._login(name, params)
+ elif name.startswith('Async'):
+ return lambda *params: self._async(name, params)
+ elif '.' in name:
+ impl = getattr(self, name.replace('.', '_'))
+ if impl is not None:
+
+ def callit(*params):
+ logging.warn('Calling %s %s', name, impl)
+ self._check_session(params)
+ return impl(*params)
+ return callit
+ if self._is_gettersetter(name, True):
+ logging.warn('Calling getter %s', name)
+ return lambda *params: self._getter(name, params)
+ elif self._is_create(name):
+ return lambda *params: self._create(name, params)
+ else:
+ return None
+
+ def _is_gettersetter(self, name, getter):
+ bits = name.split('.')
+ return (len(bits) == 2 and
+ bits[0] in _CLASSES and
+ bits[1].startswith(getter and 'get_' or 'set_'))
+
+ def _is_create(self, name):
+ bits = name.split('.')
+ return (len(bits) == 2 and
+ bits[0] in _CLASSES and
+ bits[1] == 'create')
+
+ def _getter(self, name, params):
+ self._check_session(params)
+ (cls, func) = name.split('.')
+
+ if func == 'get_all':
+ self._check_arg_count(params, 1)
+ return get_all(cls)
+
+ if func == 'get_all_records':
+ self._check_arg_count(params, 1)
+ return get_all_records(cls)
+
+ if func == 'get_record':
+ self._check_arg_count(params, 2)
+ return get_record(cls, params[1])
+
+ if (func == 'get_by_name_label' or
+ func == 'get_by_uuid'):
+ self._check_arg_count(params, 2)
+ return self._get_by_field(
+ _db_content[cls], func[len('get_by_'):], params[1])
+
+ if len(params) == 2:
+ field = func[len('get_'):]
+ ref = params[1]
+
+ if (ref in _db_content[cls] and
+ field in _db_content[cls][ref]):
+ return _db_content[cls][ref][field]
+
+ logging.error('Raising NotImplemented')
+ raise NotImplementedError(
+ 'xenapi.fake does not have an implementation for %s or it has '
+ 'been called with the wrong number of arguments' % name)
+
+ def _setter(self, name, params):
+ self._check_session(params)
+ (cls, func) = name.split('.')
+
+ if len(params) == 3:
+ field = func[len('set_'):]
+ ref = params[1]
+ val = params[2]
+
+ if (ref in _db_content[cls] and
+ field in _db_content[cls][ref]):
+ _db_content[cls][ref][field] = val
+
+ logging.warn('Raising NotImplemented')
+ raise NotImplementedError(
+ 'xenapi.fake does not have an implementation for %s or it has '
+ 'been called with the wrong number of arguments or the database '
+ 'is missing that field' % name)
+
+ def _create(self, name, params):
+ self._check_session(params)
+ is_sr_create = name == 'SR.create'
+ # Storage Repositories have a different API
+ expected = is_sr_create and 10 or 2
+ self._check_arg_count(params, expected)
+ (cls, _) = name.split('.')
+ ref = is_sr_create and \
+ _create_sr(cls, params) or _create_object(cls, params[1])
+ obj = get_record(cls, ref)
+
+ # Add RO fields
+ if cls == 'VM':
+ obj['power_state'] = 'Halted'
+
+ return ref
+
+ def _async(self, name, params):
+ task_ref = create_task(name)
+ task = _db_content['task'][task_ref]
+ func = name[len('Async.'):]
+ try:
+ task['result'] = self.xenapi_request(func, params[1:])
+ task['status'] = 'success'
+ except Failure, exc:
+ task['error_info'] = exc.details
+ task['status'] = 'failed'
+ task['finished'] = datetime.datetime.now()
+ return task_ref
+
+ def _check_session(self, params):
+ if (self._session is None or
+ self._session not in _db_content['session']):
+ raise Failure(['HANDLE_INVALID', 'session', self._session])
+ if len(params) == 0 or params[0] != self._session:
+ logging.warn('Raising NotImplemented')
+ raise NotImplementedError('Call to XenAPI without using .xenapi')
+
+ def _check_arg_count(self, params, expected):
+ actual = len(params)
+ if actual != expected:
+ raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
+ expected, actual])
+
+ def _get_by_field(self, recs, k, v):
+ result = []
+ for ref, rec in recs.iteritems():
+ if rec.get(k) == v:
+ result.append(ref)
+ return result
+
+
+# Based upon _Method from xmlrpclib.
+class _Dispatcher:
+ def __init__(self, send, name):
+ self.__send = send
+ self.__name = name
+
+ def __repr__(self):
+ if self.__name:
+ return '<xenapi.fake._Dispatcher for %s>' % self.__name
+ else:
+ return '<xenapi.fake._Dispatcher>'
+
+ def __getattr__(self, name):
+ if self.__name is None:
+ return _Dispatcher(self.__send, name)
+ else:
+ return _Dispatcher(self.__send, "%s.%s" % (self.__name, name))
+
+ def __call__(self, *args):
+ return self.__send(self.__name, args)
diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py
index ce2c68ce0..c0406d8f0 100644
--- a/nova/virt/xenapi/network_utils.py
+++ b/nova/virt/xenapi/network_utils.py
@@ -21,22 +21,23 @@ their lookup functions.
"""
-class NetworkHelper():
+from nova.virt.xenapi import HelperBase
+
+
+class NetworkHelper(HelperBase):
"""
The class that wraps the helper methods together.
"""
- def __init__(self):
- return
-
@classmethod
def find_network_with_bridge(cls, session, bridge):
- """ Return the network on which the bridge is attached, if found."""
+ """Return the network on which the bridge is attached, if found."""
expr = 'field "bridge" = "%s"' % bridge
networks = session.call_xenapi('network.get_all_records_where', expr)
if len(networks) == 1:
return networks.keys()[0]
elif len(networks) > 1:
- raise Exception('Found non-unique network for bridge %s' % bridge)
+ raise Exception(_('Found non-unique network'
+ ' for bridge %s') % bridge)
else:
- raise Exception('Found no network for bridge %s' % bridge)
+ raise Exception(_('Found no network for bridge %s') % bridge)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index badaaedc1..47fb6db53 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -23,12 +23,14 @@ import logging
import urllib
from xml.dom import minidom
+from nova import exception
from nova import flags
-from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import images
+from nova.virt.xenapi import HelperBase
+from nova.virt.xenapi.volume_utils import StorageError
FLAGS = flags.FLAGS
@@ -37,35 +39,34 @@ XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
- 'Suspended': power_state.SHUTDOWN, # FIXME
+ 'Suspended': power_state.SUSPENDED,
'Crashed': power_state.CRASHED}
-XenAPI = None
+class ImageType:
+ """
+ Enumeration class for distinguishing different image types
+ 0 - kernel/ramdisk image (goes on dom0's filesystem)
+ 1 - disk image (local SR, partitioned by objectstore plugin)
+ 2 - raw disk image (local SR, NOT partitioned by plugin)
+ """
+
+ KERNEL_RAMDISK = 0
+ DISK = 1
+ DISK_RAW = 2
-class VMHelper():
+
+class VMHelper(HelperBase):
"""
The class that wraps the helper methods together.
"""
- def __init__(self):
- return
-
- @classmethod
- def late_import(cls):
- """
- Load the XenAPI module in for helper class, if required.
- This is to avoid to install the XenAPI library when other
- hypervisors are used
- """
- global XenAPI
- if XenAPI is None:
- XenAPI = __import__('XenAPI')
-
@classmethod
- def create_vm(cls, session, instance, kernel, ramdisk):
+ def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False):
"""Create a VM record. Returns a Deferred that gives the new
- VM reference."""
+ VM reference.
+ the pv_kernel flag indicates whether the guest is HVM or PV
+ """
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
@@ -85,9 +86,9 @@ class VMHelper():
'actions_after_reboot': 'restart',
'actions_after_crash': 'destroy',
'PV_bootloader': '',
- 'PV_kernel': kernel,
- 'PV_ramdisk': ramdisk,
- 'PV_args': 'root=/dev/xvda1',
+ 'PV_kernel': '',
+ 'PV_ramdisk': '',
+ 'PV_args': '',
'PV_bootloader_args': '',
'PV_legacy_args': '',
'HVM_boot_policy': '',
@@ -99,16 +100,33 @@ class VMHelper():
'user_version': '0',
'other_config': {},
}
+ #Complete VM configuration record according to the image type
+ #non-raw/raw with PV kernel/raw in HVM mode
+ if instance.kernel_id:
+ rec['PV_bootloader'] = ''
+ rec['PV_kernel'] = kernel
+ rec['PV_ramdisk'] = ramdisk
+ rec['PV_args'] = 'root=/dev/xvda1'
+ rec['PV_bootloader_args'] = ''
+ rec['PV_legacy_args'] = ''
+ else:
+ if pv_kernel:
+ rec['PV_args'] = 'noninteractive'
+ rec['PV_bootloader'] = 'pygrub'
+ else:
+ rec['HVM_boot_policy'] = 'BIOS order'
+ rec['HVM_boot_params'] = {'order': 'dc'}
+ rec['platform'] = {'acpi': 'true', 'apic': 'true',
+ 'pae': 'true', 'viridian': 'true'}
logging.debug('Created VM %s...', instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
- logging.debug('Created VM %s as %s.', instance.name, vm_ref)
+ logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
return vm_ref
@classmethod
def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference."""
-
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
@@ -122,17 +140,53 @@ class VMHelper():
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref)
+ logging.debug(_('Creating VBD for VM %s, VDI %s ... '),
+ vm_ref, vdi_ref)
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref,
+ logging.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
vdi_ref)
return vbd_ref
@classmethod
+ def find_vbd_by_number(cls, session, vm_ref, number):
+ """Get the VBD reference from the device number"""
+ vbds = session.get_xenapi().VM.get_VBDs(vm_ref)
+ if vbds:
+ for vbd in vbds:
+ try:
+ vbd_rec = session.get_xenapi().VBD.get_record(vbd)
+ if vbd_rec['userdevice'] == str(number):
+ return vbd
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('VBD not found in instance %s') % vm_ref)
+
+ @classmethod
+ def unplug_vbd(cls, session, vbd_ref):
+ """Unplug VBD from VM"""
+ try:
+ vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref)
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ if exc.details[0] != 'DEVICE_ALREADY_DETACHED':
+ raise StorageError(_('Unable to unplug VBD %s') % vbd_ref)
+
+ @classmethod
+ def destroy_vbd(cls, session, vbd_ref):
+ """Destroy VBD from host database"""
+ try:
+ task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
+ #FIXME(armando): find a solution to missing instance_id
+ #with Josh Kearney
+ session.wait_for_task(0, task)
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
+
+ @classmethod
def create_vif(cls, session, vm_ref, network_ref, mac_address):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
-
vif_rec = {}
vif_rec['device'] = '0'
vif_rec['network'] = network_ref
@@ -142,59 +196,69 @@ class VMHelper():
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref,
+ logging.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
network_ref)
vif_ref = session.call_xenapi('VIF.create', vif_rec)
- logging.debug('Created VIF %s for VM %s, network %s.', vif_ref,
+ logging.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
vm_ref, network_ref)
return vif_ref
@classmethod
- def fetch_image(cls, session, image, user, project, use_sr):
- """use_sr: True to put the image as a VDI in an SR, False to place
- it on dom0's filesystem. The former is for VM disks, the latter for
- its kernel and ramdisk (if external kernels are being used).
- Returns a Deferred that gives the new VDI UUID."""
-
+ def fetch_image(cls, session, image, user, project, type):
+ """
+ type is interpreted as an ImageType instance
+ """
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
logging.debug("Asking xapi to fetch %s as %s", url, access)
- fn = use_sr and 'get_vdi' or 'get_kernel'
+ fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = user.secret
- if use_sr:
+ args['add_partition'] = 'false'
+ args['raw'] = 'false'
+ if type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true'
+ if type == ImageType.DISK_RAW:
+ args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- uuid = session.wait_for_task(task)
+ #FIXME(armando): find a solution to missing instance_id
+ #with Josh Kearney
+ uuid = session.wait_for_task(0, task)
return uuid
@classmethod
- def lookup(cls, session, i):
- """ Look the instance i up, and returns it if available """
- return VMHelper.lookup_blocking(session, i)
+ def lookup_image(cls, session, vdi_ref):
+ logging.debug("Looking up vdi %s for PV kernel", vdi_ref)
+ fn = "is_vdi_pv"
+ args = {}
+ args['vdi-ref'] = vdi_ref
+ #TODO: Call proper function in plugin
+ task = session.async_call_plugin('objectstore', fn, args)
+ pv_str = session.wait_for_task(task)
+ if pv_str.lower() == 'true':
+ pv = True
+ elif pv_str.lower() == 'false':
+ pv = False
+ logging.debug("PV Kernel in VDI:%d", pv)
+ return pv
@classmethod
- def lookup_blocking(cls, session, i):
- """ Synchronous lookup """
+ def lookup(cls, session, i):
+ """Look the instance i up, and returns it if available"""
vms = session.get_xenapi().VM.get_by_name_label(i)
n = len(vms)
if n == 0:
return None
elif n > 1:
- raise Exception('duplicate name found: %s' % i)
+ raise exception.Duplicate(_('duplicate name found: %s') % i)
else:
return vms[0]
@classmethod
def lookup_vm_vdis(cls, session, vm):
- """ Look for the VDIs that are attached to the VM """
- return VMHelper.lookup_vm_vdis_blocking(session, vm)
-
- @classmethod
- def lookup_vm_vdis_blocking(cls, session, vm):
- """ Synchronous lookup_vm_vdis """
+ """Look for the VDIs that are attached to the VM"""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbds = session.get_xenapi().VM.get_VBDs(vm)
@@ -205,8 +269,9 @@ class VMHelper():
vdi = session.get_xenapi().VBD.get_VDI(vbd)
# Test valid VDI
record = session.get_xenapi().VDI.get_record(vdi)
- logging.debug('VDI %s is still available', record['uuid'])
- except XenAPI.Failure, exc:
+ logging.debug(_('VDI %s is still available'),
+ record['uuid'])
+ except cls.XenAPI.Failure, exc:
logging.warn(exc)
else:
vdis.append(vdi)
@@ -217,6 +282,11 @@ class VMHelper():
@classmethod
def compile_info(cls, record):
+ """Fill record with VM status information"""
+ logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
+ record['power_state'])
+ logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
+ XENAPI_POWER_STATE[record['power_state']])
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
@@ -240,7 +310,7 @@ class VMHelper():
# Name and Value
diags[ref[0].firstChild.data] = ref[6].firstChild.data
return diags
- except XenAPI.Failure as e:
+ except cls.XenAPI.Failure as e:
return {"Unable to retrieve diagnostics": e}
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 4d897af35..ba502ffa2 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -22,12 +22,14 @@ import logging
from nova import db
from nova import context
+from nova import exception
+from nova import utils
from nova.auth.manager import AuthManager
+from nova.compute import power_state
from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
-
-XenAPI = None
+from nova.virt.xenapi.vm_utils import ImageType
class VMOps(object):
@@ -36,12 +38,9 @@ class VMOps(object):
"""
def __init__(self, session):
- global XenAPI
- if XenAPI is None:
- XenAPI = __import__('XenAPI')
+ self.XenAPI = session.get_imported_xenapi()
self._session = session
- # Load XenAPI module in the helper class
- VMHelper.late_import()
+ VMHelper.XenAPI = self.XenAPI
def list_instances(self):
"""List VM instances"""
@@ -56,8 +55,8 @@ class VMOps(object):
"""Create VM instance"""
vm = VMHelper.lookup(self._session, instance.name)
if vm is not None:
- raise Exception('Attempted to create non-unique name %s' %
- instance.name)
+ raise exception.Duplicate(_('Attempted to create'
+ ' non-unique name %s') % instance.name)
bridge = db.network_get_by_instance(context.get_admin_context(),
instance['id'])['bridge']
@@ -66,30 +65,68 @@ class VMOps(object):
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- vdi_uuid = VMHelper.fetch_image(
- self._session, instance.image_id, user, project, True)
- kernel = VMHelper.fetch_image(
- self._session, instance.kernel_id, user, project, False)
- ramdisk = VMHelper.fetch_image(
- self._session, instance.ramdisk_id, user, project, False)
+ #if kernel is not present we must download a raw disk
+ if instance.kernel_id:
+ disk_image_type = ImageType.DISK
+ else:
+ disk_image_type = ImageType.DISK_RAW
+ vdi_uuid = VMHelper.fetch_image(self._session,
+ instance.image_id, user, project, disk_image_type)
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- vm_ref = VMHelper.create_vm(
- self._session, instance, kernel, ramdisk)
+ #Have a look at the VDI and see if it has a PV kernel
+ pv_kernel = False
+ if not instance.kernel_id:
+ pv_kernel = VMHelper.lookup_image(self._session, vdi_ref)
+ kernel = None
+ if instance.kernel_id:
+ kernel = VMHelper.fetch_image(self._session,
+ instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
+ ramdisk = None
+ if instance.ramdisk_id:
+ ramdisk = VMHelper.fetch_image(self._session,
+ instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
+ vm_ref = VMHelper.create_vm(self._session,
+ instance, kernel, ramdisk, pv_kernel)
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
+
if network_ref:
VMHelper.create_vif(self._session, vm_ref,
network_ref, instance.mac_address)
- logging.debug('Starting VM %s...', vm_ref)
+ logging.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- logging.info('Spawning VM %s created %s.', instance.name,
+ logging.info(_('Spawning VM %s created %s.'), instance.name,
vm_ref)
+ # NOTE(armando): Do we really need to do this in virt?
+ timer = utils.LoopingCall(f=None)
+
+ def _wait_for_boot():
+ try:
+ state = self.get_info(instance['name'])['state']
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'], state)
+ if state == power_state.RUNNING:
+ logging.debug(_('Instance %s: booted'), instance['name'])
+ timer.stop()
+ except Exception, exc:
+ logging.warn(exc)
+ logging.exception(_('instance %s: failed to boot'),
+ instance['name'])
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'],
+ power_state.SHUTDOWN)
+ timer.stop()
+
+ timer.f = _wait_for_boot
+ return timer.start(interval=0.5, now=True)
+
def reboot(self, instance):
"""Reboot VM instance"""
instance_name = instance.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
- raise Exception('instance not present %s' % instance_name)
+ raise exception.NotFound(_('instance not'
+ ' found %s') % instance_name)
task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
self._session.wait_for_task(instance.id, task)
@@ -116,6 +153,7 @@ class VMOps(object):
self._session.wait_for_task(instance.id, task)
except XenAPI.Failure, exc:
logging.warn(exc)
+ # VM Destroy
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
@@ -135,7 +173,8 @@ class VMOps(object):
instance_name = instance.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
- raise Exception('instance not present %s' % instance_name)
+ raise exception.NotFound(_('Instance not'
+ ' found %s') % instance_name)
task = self._session.call_xenapi('Async.VM.pause', vm)
self._wait_with_callback(instance.id, task, callback)
@@ -144,15 +183,37 @@ class VMOps(object):
instance_name = instance.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
- raise Exception('instance not present %s' % instance_name)
+ raise exception.NotFound(_('Instance not'
+ ' found %s') % instance_name)
task = self._session.call_xenapi('Async.VM.unpause', vm)
self._wait_with_callback(instance.id, task, callback)
+ def suspend(self, instance, callback):
+ """suspend the specified instance"""
+ instance_name = instance.name
+ vm = VMHelper.lookup(self._session, instance_name)
+ if vm is None:
+ raise Exception(_("suspend: instance not present %s") %
+ instance_name)
+ task = self._session.call_xenapi('Async.VM.suspend', vm)
+ self._wait_with_callback(task, callback)
+
+ def resume(self, instance, callback):
+ """resume the specified instance"""
+ instance_name = instance.name
+ vm = VMHelper.lookup(self._session, instance_name)
+ if vm is None:
+ raise Exception(_("resume: instance not present %s") %
+ instance_name)
+ task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
+ self._wait_with_callback(task, callback)
+
def get_info(self, instance_id):
"""Return data about VM instance"""
- vm = VMHelper.lookup_blocking(self._session, instance_id)
+ vm = VMHelper.lookup(self._session, instance_id)
if vm is None:
- raise Exception('instance not present %s' % instance_id)
+ raise exception.NotFound(_('Instance not'
+ ' found %s') % instance_id)
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_info(rec)
@@ -160,7 +221,7 @@ class VMOps(object):
"""Return data about VM diagnostics"""
vm = VMHelper.lookup(self._session, instance_id)
if vm is None:
- raise Exception("instance not present %s" % instance_id)
+ raise exception.NotFound(_("Instance not found %s") % instance_id)
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_diagnostics(self._session, rec)
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
new file mode 100644
index 000000000..1ca813bcf
--- /dev/null
+++ b/nova/virt/xenapi/volume_utils.py
@@ -0,0 +1,266 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of volumes,
+and storage repositories
+"""
+
+import re
+import string
+import logging
+
+from nova import db
+from nova import context
+from nova import exception
+from nova import flags
+from nova import utils
+from nova.virt.xenapi import HelperBase
+
+FLAGS = flags.FLAGS
+
+
+class StorageError(Exception):
+ """To raise errors related to SR, VDI, PBD, and VBD commands"""
+
+ def __init__(self, message=None):
+ super(StorageError, self).__init__(message)
+
+
+class VolumeHelper(HelperBase):
+ """
+ The class that wraps the helper methods together.
+ """
+
+ @classmethod
+ def create_iscsi_storage(cls, session, info, label, description):
+ """
+ Create an iSCSI storage repository that will be used to mount
+ the volume for the specified instance
+ """
+ sr_ref = session.get_xenapi().SR.get_by_name_label(label)
+ if len(sr_ref) == 0:
+ logging.debug('Introducing %s...', label)
+ record = {}
+ if 'chapuser' in info and 'chappassword' in info:
+ record = {'target': info['targetHost'],
+ 'port': info['targetPort'],
+ 'targetIQN': info['targetIQN'],
+ 'chapuser': info['chapuser'],
+ 'chappassword': info['chappassword']}
+ else:
+ record = {'target': info['targetHost'],
+ 'port': info['targetPort'],
+ 'targetIQN': info['targetIQN']}
+ try:
+ sr_ref = session.get_xenapi().SR.create(
+ session.get_xenapi_host(),
+ record,
+ '0', label, description, 'iscsi', '', False, {})
+ logging.debug('Introduced %s as %s.', label, sr_ref)
+ return sr_ref
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to create Storage Repository'))
+ else:
+ return sr_ref[0]
+
+ @classmethod
+ def find_sr_from_vbd(cls, session, vbd_ref):
+ """Find the SR reference from the VBD reference"""
+ try:
+ vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
+ sr_ref = session.get_xenapi().VDI.get_SR(vdi_ref)
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
+ return sr_ref
+
+ @classmethod
+ def destroy_iscsi_storage(cls, session, sr_ref):
+ """Forget the SR whilst preserving the state of the disk"""
+ logging.debug("Forgetting SR %s ... ", sr_ref)
+ pbds = []
+ try:
+ pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
+ except cls.XenAPI.Failure, exc:
+ logging.warn('Ignoring exception %s when getting PBDs for %s',
+ exc, sr_ref)
+ for pbd in pbds:
+ try:
+ session.get_xenapi().PBD.unplug(pbd)
+ except cls.XenAPI.Failure, exc:
+ logging.warn('Ignoring exception %s when unplugging PBD %s',
+ exc, pbd)
+ try:
+ session.get_xenapi().SR.forget(sr_ref)
+ logging.debug("Forgetting SR %s done.", sr_ref)
+ except cls.XenAPI.Failure, exc:
+ logging.warn('Ignoring exception %s when forgetting SR %s',
+ exc, sr_ref)
+
+ @classmethod
+ def introduce_vdi(cls, session, sr_ref):
+ """Introduce VDI in the host"""
+ try:
+ vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
+ try:
+ vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to get record'
+ ' of VDI %s on') % vdis[0])
+ else:
+ try:
+ return session.get_xenapi().VDI.introduce(
+ vdi_rec['uuid'],
+ vdi_rec['name_label'],
+ vdi_rec['name_description'],
+ vdi_rec['SR'],
+ vdi_rec['type'],
+ vdi_rec['sharable'],
+ vdi_rec['read_only'],
+ vdi_rec['other_config'],
+ vdi_rec['location'],
+ vdi_rec['xenstore_data'],
+ vdi_rec['sm_config'])
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to introduce VDI for SR %s')
+ % sr_ref)
+
+ @classmethod
+ def parse_volume_info(cls, device_path, mountpoint):
+ """
+ Parse device_path and mountpoint as they can be used by XenAPI.
+ In particular, the mountpoint (e.g. /dev/sdc) must be translated
+ into a numeric literal.
+ FIXME(armando):
+ As for device_path, currently cannot be used as it is,
+ because it does not contain target information. As for interim
+ solution, target details are passed either via Flags or obtained
+ by iscsiadm. Long-term solution is to add a few more fields to the
+ db in the iscsi_target table with the necessary info and modify
+ the iscsi driver to set them.
+ """
+ device_number = VolumeHelper.mountpoint_to_number(mountpoint)
+ volume_id = _get_volume_id(device_path)
+ (iscsi_name, iscsi_portal) = _get_target(volume_id)
+ target_host = _get_target_host(iscsi_portal)
+ target_port = _get_target_port(iscsi_portal)
+ target_iqn = _get_iqn(iscsi_name, volume_id)
+ logging.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
+ volume_id,
+ target_host,
+ target_port,
+ target_iqn)
+ if (device_number < 0) or \
+ (volume_id is None) or \
+ (target_host is None) or \
+ (target_iqn is None):
+ raise StorageError(_('Unable to obtain target information %s, %s')
+ % (device_path, mountpoint))
+ volume_info = {}
+ volume_info['deviceNumber'] = device_number
+ volume_info['volumeId'] = volume_id
+ volume_info['targetHost'] = target_host
+ volume_info['targetPort'] = target_port
+ volume_info['targetIQN'] = target_iqn
+ return volume_info
+
+ @classmethod
+ def mountpoint_to_number(cls, mountpoint):
+ """Translate a mountpoint like /dev/sdc into a numeric"""
+ if mountpoint.startswith('/dev/'):
+ mountpoint = mountpoint[5:]
+ if re.match('^[hs]d[a-p]$', mountpoint):
+ return (ord(mountpoint[2:3]) - ord('a'))
+ elif re.match('^vd[a-p]$', mountpoint):
+ return (ord(mountpoint[2:3]) - ord('a'))
+ elif re.match('^[0-9]+$', mountpoint):
+ return string.atoi(mountpoint, 10)
+ else:
+ logging.warn('Mountpoint cannot be translated: %s', mountpoint)
+ return -1
+
+
+def _get_volume_id(path):
+ """Retrieve the volume id from device_path"""
+ # n must contain at least the volume_id
+ # /vol- is for remote volumes
+ # -vol- is for local volumes
+ # see compute/manager->setup_compute_volume
+ volume_id = path[path.find('/vol-') + 1:]
+ if volume_id == path:
+ volume_id = path[path.find('-vol-') + 1:].replace('--', '-')
+ return volume_id
+
+
+def _get_target_host(iscsi_string):
+ """Retrieve target host"""
+ if iscsi_string:
+ return iscsi_string[0:iscsi_string.find(':')]
+ elif iscsi_string is None or FLAGS.target_host:
+ return FLAGS.target_host
+
+
+def _get_target_port(iscsi_string):
+ """Retrieve target port"""
+ if iscsi_string:
+ return iscsi_string[iscsi_string.find(':') + 1:]
+ elif iscsi_string is None or FLAGS.target_port:
+ return FLAGS.target_port
+
+
+def _get_iqn(iscsi_string, id):
+ """Retrieve target IQN"""
+ if iscsi_string:
+ return iscsi_string
+ elif iscsi_string is None or FLAGS.iqn_prefix:
+ volume_id = _get_volume_id(id)
+ return '%s:%s' % (FLAGS.iqn_prefix, volume_id)
+
+
+def _get_target(volume_id):
+ """
+ Gets iscsi name and portal from volume name and host.
+ For this method to work the following are needed:
+ 1) volume_ref['host'] must resolve to something rather than loopback
+ 2) ietd must bind only to the address as resolved above
+ If any of the two conditions are not met, fall back on Flags.
+ """
+ volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(),
+ volume_id)
+ result = (None, None)
+ try:
+ (r, _e) = utils.execute("sudo iscsiadm -m discovery -t "
+ "sendtargets -p %s" %
+ volume_ref['host'])
+ except exception.ProcessExecutionError, exc:
+ logging.warn(exc)
+ else:
+ targets = r.splitlines()
+ if len(_e) == 0 and len(targets) == 1:
+ for target in targets:
+ if volume_id in target:
+ (location, _sep, iscsi_name) = target.partition(" ")
+ break
+ iscsi_portal = location.split(",")[0]
+ result = (iscsi_name, iscsi_portal)
+ return result
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 1943ccab0..fdeb2506c 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -17,17 +17,110 @@
"""
Management class for Storage-related functions (attach, detach, etc).
"""
+import logging
+
+from nova import exception
+from nova.virt.xenapi.vm_utils import VMHelper
+from nova.virt.xenapi.volume_utils import VolumeHelper
+from nova.virt.xenapi.volume_utils import StorageError
class VolumeOps(object):
+ """
+ Management class for Volume-related tasks
+ """
def __init__(self, session):
+ self.XenAPI = session.get_imported_xenapi()
self._session = session
+ # Load XenAPI module in the helper classes respectively
+ VolumeHelper.XenAPI = self.XenAPI
+ VMHelper.XenAPI = self.XenAPI
def attach_volume(self, instance_name, device_path, mountpoint):
- # FIXME: that's going to be sorted when iscsi-xenapi lands in branch
- return True
+ """Attach volume storage to VM instance"""
+ # Before we start, check that the VM exists
+ vm_ref = VMHelper.lookup(self._session, instance_name)
+ if vm_ref is None:
+ raise exception.NotFound(_('Instance %s not found')
+ % instance_name)
+ # NOTE: No Resource Pool concept so far
+ logging.debug(_("Attach_volume: %s, %s, %s"),
+ instance_name, device_path, mountpoint)
+ # Create the iSCSI SR, and the PDB through which hosts access SRs.
+ # But first, retrieve target info, like Host, IQN, LUN and SCSIID
+ vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
+ label = 'SR-%s' % vol_rec['volumeId']
+ description = 'Disk-for:%s' % instance_name
+ # Create SR
+ sr_ref = VolumeHelper.create_iscsi_storage(self._session,
+ vol_rec,
+ label,
+ description)
+ # Introduce VDI and attach VBD to VM
+ try:
+ vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref)
+ except StorageError, exc:
+ logging.warn(exc)
+ VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
+ raise Exception(_('Unable to create VDI on SR %s for instance %s')
+ % (sr_ref,
+ instance_name))
+ else:
+ try:
+ vbd_ref = VMHelper.create_vbd(self._session,
+ vm_ref, vdi_ref,
+ vol_rec['deviceNumber'],
+ False)
+ except self.XenAPI.Failure, exc:
+ logging.warn(exc)
+ VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
+ raise Exception(_('Unable to use SR %s for instance %s')
+ % (sr_ref,
+ instance_name))
+ else:
+ try:
+ task = self._session.call_xenapi('Async.VBD.plug',
+ vbd_ref)
+ self._session.wait_for_task(vol_rec['deviceNumber'], task)
+ except self.XenAPI.Failure, exc:
+ logging.warn(exc)
+ VolumeHelper.destroy_iscsi_storage(self._session,
+ sr_ref)
+ raise Exception(_('Unable to attach volume to instance %s')
+ % instance_name)
+ logging.info(_('Mountpoint %s attached to instance %s'),
+ mountpoint, instance_name)
def detach_volume(self, instance_name, mountpoint):
- # FIXME: that's going to be sorted when iscsi-xenapi lands in branch
- return True
+ """Detach volume storage to VM instance"""
+ # Before we start, check that the VM exists
+ vm_ref = VMHelper.lookup(self._session, instance_name)
+ if vm_ref is None:
+ raise exception.NotFound(_('Instance %s not found')
+ % instance_name)
+ # Detach VBD from VM
+ logging.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
+ device_number = VolumeHelper.mountpoint_to_number(mountpoint)
+ try:
+ vbd_ref = VMHelper.find_vbd_by_number(self._session,
+ vm_ref, device_number)
+ except StorageError, exc:
+ logging.warn(exc)
+ raise Exception(_('Unable to locate volume %s') % mountpoint)
+ else:
+ try:
+ sr_ref = VolumeHelper.find_sr_from_vbd(self._session,
+ vbd_ref)
+ VMHelper.unplug_vbd(self._session, vbd_ref)
+ except StorageError, exc:
+ logging.warn(exc)
+ raise Exception(_('Unable to detach volume %s') % mountpoint)
+ try:
+ VMHelper.destroy_vbd(self._session, vbd_ref)
+ except StorageError, exc:
+ logging.warn(exc)
+ # Forget SR
+ VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
+ logging.info(_('Mountpoint %s detached from instance %s'),
+ mountpoint, instance_name)
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 146e2f153..7f03d6c2b 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -44,7 +44,10 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
:xenapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks (Async.VM.start, etc)
(default: 0.5).
-
+:target_host: the iSCSI Target Host IP address, i.e. the IP
+ address for the nova-volume host
+:target_port: iSCSI Target Port, 3260 Default
+:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
"""
import logging
@@ -62,6 +65,7 @@ from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
FLAGS = flags.FLAGS
+
flags.DEFINE_string('xenapi_connection_url',
None,
'URL for connection to XenServer/Xen Cloud Platform.'
@@ -79,18 +83,20 @@ flags.DEFINE_float('xenapi_task_poll_interval',
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
-
-XenAPI = None
+flags.DEFINE_string('target_host',
+ None,
+ 'iSCSI Target Host')
+flags.DEFINE_string('target_port',
+ '3260',
+ 'iSCSI Target Port, 3260 Default')
+flags.DEFINE_string('iqn_prefix',
+ 'iqn.2010-10.org.openstack',
+ 'IQN Prefix')
def get_connection(_):
"""Note that XenAPI doesn't have a read-only connection mode, so
the read_only parameter is ignored."""
- # This is loaded late so that there's no need to install this
- # library when not using XenAPI.
- global XenAPI
- if XenAPI is None:
- XenAPI = __import__('XenAPI')
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
@@ -110,6 +116,13 @@ class XenAPIConnection(object):
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
+ def init_host(self):
+ #FIXME(armando): implement this
+ #NOTE(armando): would we need a method
+ #to call when shutting down the host?
+ #e.g. to do session logout?
+ pass
+
def list_instances(self):
"""List VM instances"""
return self._vmops.list_instances()
@@ -134,6 +147,14 @@ class XenAPIConnection(object):
"""Unpause paused VM instance"""
self._vmops.unpause(instance, callback)
+ def suspend(self, instance, callback):
+ """suspend the specified instance"""
+ self._vmops.suspend(instance, callback)
+
+ def resume(self, instance, callback):
+ """resume the specified instance"""
+ self._vmops.resume(instance, callback)
+
def get_info(self, instance_id):
"""Return data about VM instance"""
return self._vmops.get_info(instance_id)
@@ -161,9 +182,14 @@ class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
def __init__(self, url, user, pw):
- self._session = XenAPI.Session(url)
+ self.XenAPI = self.get_imported_xenapi()
+ self._session = self._create_session(url)
self._session.login_with_password(user, pw)
+ def get_imported_xenapi(self):
+ """Stubout point. This can be replaced with a mock xenapi module."""
+ return __import__('XenAPI')
+
def get_xenapi(self):
"""Return the xenapi object"""
return self._session.xenapi
@@ -181,29 +207,33 @@ class XenAPISession(object):
def async_call_plugin(self, plugin, fn, args):
"""Call Async.host.call_plugin on a background thread."""
- return tpool.execute(_unwrap_plugin_exceptions,
+ return tpool.execute(self._unwrap_plugin_exceptions,
self._session.xenapi.Async.host.call_plugin,
self.get_xenapi_host(), plugin, fn, args)
- def wait_for_task(self, instance_id, task):
- """Return a Deferred that will give the result of the given task.
- The task is polled until it completes."""
+ def wait_for_task(self, id, task):
+ """Return the result of the given task. The task is polled
+ until it completes."""
done = event.Event()
- loop = utils.LoopingCall(self._poll_task, instance_id, task, done)
+ loop = utils.LoopingCall(self._poll_task, id, task, done)
loop.start(FLAGS.xenapi_task_poll_interval, now=True)
rv = done.wait()
loop.stop()
return rv
- def _poll_task(self, instance_id, task, done):
+ def _create_session(self, url):
+ """Stubout point. This can be replaced with a mock session."""
+ return self.XenAPI.Session(url)
+
+ def _poll_task(self, id, task, done):
"""Poll the given XenAPI task, and fire the given Deferred if we
get a result."""
try:
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
action = dict(
- instance_id=int(instance_id),
+ id=int(id),
action=name,
error=None)
if status == "pending":
@@ -223,33 +253,32 @@ class XenAPISession(object):
task,
status,
error_info))
- done.send_exception(XenAPI.Failure(error_info))
+ done.send_exception(self.XenAPI.Failure(error_info))
db.instance_action_create(context.get_admin_context(), action)
- except XenAPI.Failure, exc:
+ except self.XenAPI.Failure, exc:
logging.warn(exc)
done.send_exception(*sys.exc_info())
-
-def _unwrap_plugin_exceptions(func, *args, **kwargs):
- """Parse exception details"""
- try:
- return func(*args, **kwargs)
- except XenAPI.Failure, exc:
- logging.debug(_("Got exception: %s"), exc)
- if (len(exc.details) == 4 and
- exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
- exc.details[2] == 'Failure'):
- params = None
- try:
- params = eval(exc.details[3])
- except:
- raise exc
- raise XenAPI.Failure(params)
- else:
+ def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
+ """Parse exception details"""
+ try:
+ return func(*args, **kwargs)
+ except self.XenAPI.Failure, exc:
+ logging.debug(_("Got exception: %s"), exc)
+ if (len(exc.details) == 4 and
+ exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
+ exc.details[2] == 'Failure'):
+ params = None
+ try:
+ params = eval(exc.details[3])
+ except:
+ raise exc
+ raise self.XenAPI.Failure(params)
+ else:
+ raise
+ except xmlrpclib.ProtocolError, exc:
+ logging.debug(_("Got exception: %s"), exc)
raise
- except xmlrpclib.ProtocolError, exc:
- logging.debug(_("Got exception: %s"), exc)
- raise
def _parse_xmlrpc_value(val):
diff --git a/nova/wsgi.py b/nova/wsgi.py
index c7ee9ed14..b5d6b96c1 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -270,7 +270,7 @@ class Serializer(object):
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
- req = webob.Request(environ)
+ req = webob.Request.blank('', environ)
suffix = req.path_info.split('.')[-1].lower()
if suffix == 'json':
self.handler = self._to_json
diff --git a/plugins/xenserver/doc/networking.rst b/plugins/xenserver/doc/networking.rst
new file mode 100644
index 000000000..67f2d9af3
--- /dev/null
+++ b/plugins/xenserver/doc/networking.rst
@@ -0,0 +1,144 @@
+Multi Tenancy Networking Protections in XenServer
+=================================================
+
+The purpose of the vif_rules script is to allow multi-tenancy on a XenServer
+host. In a multi-tenant cloud environment a host machine needs to be able to
+enforce network isolation amongst guest instances, at both layer two and layer
+three. The rules prevent guests from taking and using unauthorized IP addresses,
+sniffing other guests traffic, and prevents ARP poisoning attacks. This current
+revision only supports IPv4, but will support IPv6 in the future.
+
+Kernel Requirements
+===================
+
+- physdev module
+- arptables support
+- ebtables support
+- iptables support
+
+If the kernel doesn't support these, you will need to obtain the Source RPMS for
+the proper version of XenServer to recompile the dom0 kernel.
+
+XenServer Requirements (32-bit dom0)
+====================================
+
+- arptables 32-bit rpm
+- ebtables 32-bit rpm
+- python-simplejson
+
+XenServer Environment Specific Notes
+====================================
+
+- XenServer 5.5 U1 based on the 2.6.18 kernel didn't include physdev module
+ support. Support for this had to be recompiled into the kernel.
+- XenServer 5.6 based on the 2.6.27 kernel didn't include physdev, ebtables, or
+ arptables.
+- XenServer 5.6 FP1 didn't include physdev, ebtables, or arptables but they do
+ have a Cloud Supplemental pack available to partners which swaps out the
+ kernels for kernels that support the networking rules.
+
+How it works - tl;dr
+====================
+
+iptables, ebtables, and arptables drop rules are applied to all forward chains
+on the host. These are applied at boot time with an init script. They ensure
+all forwarded packets are dropped by default. Allow rules are then applied to
+the instances to ensure they have permission to talk on the internet.
+
+How it works - Long
+===================
+
+Any time an underprivileged domain or domU is started or stopped, it gets a
+unique domain id (dom_id). This dom_id is utilized in a number of places, one
+of which is it's assigned to the virtual interface (vif). The vifs are attached
+to the bridge that is attached to the physical network. For instance, if you
+had a public bridge attached to eth0 and your domain id was 5, your vif would be
+vif5.0.
+
+The networking rules are applied to the VIF directly so they apply at the lowest
+level of the networking stack. Because the VIF changes along with the domain id
+on any start, stop, or reboot of the instance, the rules need to be removed and
+re-added any time that occurs.
+
+Because the dom_id can change often, the vif_rules script is hooked into the
+/etc/xensource/scripts/vif script that gets called anytime an instance is
+started, or stopped, which includes pauses and resumes.
+
+Examples of the rules ran for the host on boot:
+
+iptables -P FORWARD DROP
+iptables -A FORWARD -m physdev --physdev-in eth0 -j ACCEPT
+ebtables -P FORWARD DROP
+ebtables -A FORWARD -o eth0 -j ACCEPT
+arptables -P FORWARD DROP
+arptables -A FORWARD --opcode Request --in-interface eth0 -j ACCEPT
+arptables -A FORWARD --opcode Reply --in-interface eth0 -j ACCEPT
+
+Examples of the rules that are ran per instance state change:
+
+iptables -A FORWARD -m physdev --physdev-in vif1.0 -s 10.1.135.22/32 -j ACCEPT
+arptables -A FORWARD --opcode Request --in-interface "vif1.0" \
+ --source-ip 10.1.135.22 -j ACCEPT
+arptables -A FORWARD --opcode Reply --in-interface "vif1.0" \
+ --source-ip 10.1.135.22 --source-mac 9e:6e:cc:19:7f:fe -j ACCEPT
+ebtables -A FORWARD -p 0806 -o vif1.0 --arp-ip-dst 10.1.135.22 -j ACCEPT
+ebtables -A FORWARD -p 0800 -o vif1.0 --ip-dst 10.1.135.22 -j ACCEPT
+ebtables -I FORWARD 1 -s ! 9e:6e:cc:19:7f:fe -i vif1.0 -j DROP
+
+Typically when you see a vif, it'll look like vif<domain id>.<network bridge>.
+vif2.1 for example would be domain 2 on the second interface.
+
+The vif_rules.py script needs to pull information about the IPs and MAC
+addresses assigned to the instance. The current implementation assumes that
+information is put into the VM Record into the xenstore-data key in a JSON
+string. The vif_rules.py script reads out of the JSON string to determine the
+IPs, and MAC addresses to protect.
+
+An example format is given below:
+
+# xe vm-param-get uuid=<uuid> param-name=xenstore-data
+xenstore-data (MRW):
+vm-data/networking/4040fa7292e4:
+{"label": "public",
+ "ips": [{"netmask":"255.255.255.0",
+ "enabled":"1",
+ "ip":"173.200.100.10"}],
+ "mac":"40:40:fa:72:92:e4",
+ "gateway":"173.200.100.1",
+ "vm_id":"123456",
+ "dns":["72.3.128.240","72.3.128.241"]};
+
+vm-data/networking/40402321c9b8:
+{"label":"private",
+ "ips":[{"netmask":"255.255.224.0",
+ "enabled":"1",
+ "ip":"10.177.10.10"}],
+ "routes":[{"route":"10.176.0.0",
+ "netmask":"255.248.0.0",
+ "gateway":"10.177.10.1"},
+ {"route":"10.191.192.0",
+ "netmask":"255.255.192.0",
+ "gateway":"10.177.10.1"}],
+ "mac":"40:40:23:21:c9:b8"}
+
+The key is used for two purposes. One, the vif_rules.py script will read from
+it to apply the rules needed after parsing the JSON. The second is that because
+it's put into the xenstore-data field, the xenstore will be populated with this
+data on boot. This allows a guest agent the ability to read out data about the
+instance and apply configurations as needed.
+
+Installation
+============
+
+- Copy host-rules into /etc/init.d/ and make sure to chmod +x host-rules.
+- Run 'chkconfig host-rules on' to add the init script to start up.
+- Copy vif_rules.py into /etc/xensource/scripts
+- Patch /etc/xensource/scripts/vif using the supplied patch file. It may vary
+ for different versions of XenServer but it should be pretty self explanatory.
+ It calls the vif_rules.py script on domain creation and tear down.
+- Run '/etc/init.d/host-rules start' to start up the host based rules.
+- The instance rules will then fire on creation of the VM as long as the correct
+ JSON is in place.
+- You can check to see if the rules are in place with: iptables --list,
+ arptables --list, or ebtables --list
+
diff --git a/plugins/xenserver/networking/etc/init.d/host-rules b/plugins/xenserver/networking/etc/init.d/host-rules
new file mode 100755
index 000000000..798da9552
--- /dev/null
+++ b/plugins/xenserver/networking/etc/init.d/host-rules
@@ -0,0 +1,106 @@
+#!/bin/bash
+#
+# host-rules Start/Stop the networking host rules
+#
+# chkconfig: 2345 85 15
+# description: Networking Host Rules for Multi Tenancy Protections
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+IPTABLES=/sbin/iptables
+EBTABLES=/sbin/ebtables
+ARPTABLES=/sbin/arptables
+
+iptables-up()
+{
+ $IPTABLES -P FORWARD DROP
+ $IPTABLES -A FORWARD -m physdev --physdev-in eth0 -j ACCEPT
+ $IPTABLES -A FORWARD -m physdev --physdev-in eth1 -j ACCEPT
+}
+
+ebtables-up()
+{
+ $EBTABLES -P FORWARD DROP
+ $EBTABLES -A FORWARD -o eth0 -j ACCEPT
+ $EBTABLES -A FORWARD -o eth1 -j ACCEPT
+}
+
+arptables-up()
+{
+ $ARPTABLES -P FORWARD DROP
+ $ARPTABLES -A FORWARD --opcode Request --in-interface eth0 -j ACCEPT
+ $ARPTABLES -A FORWARD --opcode Reply --in-interface eth0 -j ACCEPT
+ $ARPTABLES -A FORWARD --opcode Request --in-interface eth1 -j ACCEPT
+ $ARPTABLES -A FORWARD --opcode Reply --in-interface eth1 -j ACCEPT
+}
+
+iptables-down()
+{
+ $IPTABLES -P FORWARD ACCEPT
+ $IPTABLES -D FORWARD -m physdev --physdev-in eth0 -j ACCEPT
+ $IPTABLES -D FORWARD -m physdev --physdev-in eth1 -j ACCEPT
+}
+
+ebtables-down()
+{
+ $EBTABLES -P FORWARD ACCEPT
+ $EBTABLES -D FORWARD -o eth0 -j ACCEPT
+ $EBTABLES -D FORWARD -o eth1 -j ACCEPT
+}
+
+arptables-down()
+{
+ $ARPTABLES -P FORWARD ACCEPT
+ $ARPTABLES -D FORWARD --opcode Request --in-interface eth0 -j ACCEPT
+ $ARPTABLES -D FORWARD --opcode Reply --in-interface eth0 -j ACCEPT
+ $ARPTABLES -D FORWARD --opcode Request --in-interface eth1 -j ACCEPT
+ $ARPTABLES -D FORWARD --opcode Reply --in-interface eth1 -j ACCEPT
+}
+
+start()
+{
+ iptables-up
+ ebtables-up
+ arptables-up
+}
+
+stop()
+{
+ iptables-down
+ ebtables-down
+ arptables-down
+}
+
+case "$1" in
+ start)
+ start
+ RETVAL=$?
+ ;;
+ stop)
+ stop
+ RETVAL=$?
+ ;;
+ restart)
+ stop
+ start
+ RETVAL=$?
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|restart}"
+ exit 1
+ ;;
+esac
+exit $RETVAL
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch b/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch
new file mode 100644
index 000000000..feaf1312d
--- /dev/null
+++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch
@@ -0,0 +1,22 @@
+--- vif 2010-12-20 16:39:46.000000000 +0000
++++ vif_modified 2010-11-19 23:24:37.000000000 +0000
+@@ -213,6 +213,7 @@
+
+ # xs-xen.pq.hq:91e986b8e49f netback-wait-for-hotplug
+ xenstore-write "/local/domain/0/backend/vif/${DOMID}/${DEVID}/hotplug-status" "connected"
++ python /etc/xensource/scripts/vif_rules.py ${DOMID} online 2>&1 > /dev/null
+ fi
+ ;;
+
+@@ -224,9 +225,11 @@
+
+ remove)
+ if [ "${TYPE}" = "vif" ] ;then
++ python /etc/xensource/scripts/vif_rules.py ${DOMID} offline 2>&1 > /dev/null
+ xenstore-rm "${HOTPLUG}/hotplug"
+ fi
+ logger -t scripts-vif "${dev} has been removed"
+ remove_from_bridge
+ ;;
+ esac
++
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
new file mode 100755
index 000000000..d60816ce7
--- /dev/null
+++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+This script is used to configure iptables, ebtables, and arptables rules on
+XenServer hosts.
+"""
+
+import os
+import subprocess
+import sys
+
+# This is written to Python 2.4, since that is what is available on XenServer
+import simplejson as json
+
+
+def main(dom_id, command, only_this_vif=None):
+ xsls = execute("/usr/bin/xenstore-ls /local/domain/%s/vm-data/networking" \
+ % dom_id, True)
+ macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
+
+ for mac in macs:
+ xsr = "/usr/bin/xenstore-read /local/domain/%s/vm-data/networking/%s"
+ xsread = execute(xsr % (dom_id, mac), True)
+ data = json.loads(xsread)
+ for ip in data['ips']:
+ if data["label"] == "public":
+ vif = "vif%s.0" % dom_id
+ else:
+ vif = "vif%s.1" % dom_id
+
+ if (only_this_vif is None) or (vif == only_this_vif):
+ params = dict(IP=ip['ip'], VIF=vif, MAC=data['mac'])
+ apply_ebtables_rules(command, params)
+ apply_arptables_rules(command, params)
+ apply_iptables_rules(command, params)
+
+
+def execute(command, return_stdout=False):
+ devnull = open(os.devnull, 'w')
+ proc = subprocess.Popen(command, shell=True, close_fds=True,
+ stdout=subprocess.PIPE, stderr=devnull)
+ devnull.close()
+ if return_stdout:
+ return proc.stdout.read()
+ else:
+ return None
+
+# A note about adding rules:
+# Whenever we add any rule to iptables, arptables or ebtables we first
+# delete the same rule to ensure the rule only exists once.
+
+
+def apply_iptables_rules(command, params):
+ iptables = lambda rule: execute("/sbin/iptables %s" % rule)
+
+ iptables("-D FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
+ -j ACCEPT" % params)
+ if command == 'online':
+ iptables("-A FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
+ -j ACCEPT" % params)
+
+
+def apply_arptables_rules(command, params):
+ arptables = lambda rule: execute("/sbin/arptables %s" % rule)
+
+ arptables("-D FORWARD --opcode Request --in-interface %(VIF)s \
+ --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
+ arptables("-D FORWARD --opcode Reply --in-interface %(VIF)s \
+ --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
+ if command == 'online':
+ arptables("-A FORWARD --opcode Request --in-interface %(VIF)s \
+ --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
+ arptables("-A FORWARD --opcode Reply --in-interface %(VIF)s \
+ --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
+
+
+def apply_ebtables_rules(command, params):
+ ebtables = lambda rule: execute("/sbin/ebtables %s" % rule)
+
+ ebtables("-D FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s -j ACCEPT" %
+ params)
+ ebtables("-D FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s -j ACCEPT" %
+ params)
+ if command == 'online':
+ ebtables("-A FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s \
+ -j ACCEPT" % params)
+ ebtables("-A FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s \
+ -j ACCEPT" % params)
+
+ ebtables("-D FORWARD -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
+ if command == 'online':
+ ebtables("-I FORWARD 1 -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 3:
+ print "usage: %s dom_id online|offline [vif]" % \
+ os.path.basename(sys.argv[0])
+ sys.exit(1)
+ else:
+ dom_id, command = sys.argv[1:3]
+ vif = len(sys.argv) == 4 and sys.argv[3] or None
+ main(dom_id, command, vif)
diff --git a/plugins/xenapi/README b/plugins/xenserver/xenapi/README
index fbd471035..fbd471035 100644
--- a/plugins/xenapi/README
+++ b/plugins/xenserver/xenapi/README
diff --git a/plugins/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore
index 271e7337f..8ee2f748d 100644
--- a/plugins/xenapi/etc/xapi.d/plugins/objectstore
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore
@@ -43,24 +43,43 @@ SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
-
+def is_vdi_pv(session,args):
+ logging.debug("Checking wheter VDI has PV kernel")
+ vdi = exists(args, 'vdi-ref')
+ pv=with_vdi_in_dom0(session, vdi, False,
+ lambda dev: _is_vdi_pv('/dev/%s' % dev))
+ if pv:
+ return 'true'
+ else:
+ return 'false'
+
+def _is_vdi_pv(dest):
+ logging.debug("Running pygrub against %s",dest)
+ output=os.popen('pygrub -qn %s' % dest)
+ pv=False
+ for line in output.readlines():
+ #try to find kernel string
+ m=re.search('(?<=kernel:)/.*(?:>)',line)
+ if m:
+ if m.group(0).find('xen')!=-1:
+ pv=True
+ logging.debug("PV:%d",pv)
+ return pv
+
def get_vdi(session, args):
src_url = exists(args, 'src_url')
username = exists(args, 'username')
password = exists(args, 'password')
+ raw_image=validate_bool(args, 'raw', 'false')
add_partition = validate_bool(args, 'add_partition', 'false')
-
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
-
sr = find_sr(session)
if sr is None:
raise Exception('Cannot find SR to write VDI to')
-
virtual_size = \
get_content_length(proto, netloc, url_path, username, password)
if virtual_size < 0:
raise Exception('Cannot get VDI size')
-
vdi_size = virtual_size
if add_partition:
# Make room for MBR.
@@ -69,18 +88,19 @@ def get_vdi(session, args):
vdi = create_vdi(session, sr, src_url, vdi_size, False)
with_vdi_in_dom0(session, vdi, False,
lambda dev: get_vdi_(proto, netloc, url_path,
- username, password, add_partition,
+ username, password, add_partition,raw_image,
virtual_size, '/dev/%s' % dev))
return session.xenapi.VDI.get_uuid(vdi)
-def get_vdi_(proto, netloc, url_path, username, password, add_partition,
+def get_vdi_(proto, netloc, url_path, username, password, add_partition,raw_image,
virtual_size, dest):
- if add_partition:
+ #Salvatore: vdi should not be partitioned for raw images
+ if (add_partition and not raw_image):
write_partition(virtual_size, dest)
- offset = add_partition and MBR_SIZE_BYTES or 0
+ offset = (add_partition and not raw_image and MBR_SIZE_BYTES) or 0
get(proto, netloc, url_path, username, password, dest, offset)
@@ -228,4 +248,5 @@ def download_all(response, length, dest_file, offset):
if __name__ == '__main__':
XenAPIPlugin.dispatch({'get_vdi': get_vdi,
- 'get_kernel': get_kernel})
+ 'get_kernel': get_kernel,
+ 'is_vdi_pv': is_vdi_pv})
diff --git a/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
index 2d323a016..2d323a016 100755
--- a/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
diff --git a/run_tests.py b/run_tests.py
deleted file mode 100644
index 312ed7ef3..000000000
--- a/run_tests.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-This is our basic test running framework based on Twisted's Trial.
-
-Usage Examples:
-
- # to run all the tests
- python run_tests.py
-
- # to run a specific test suite imported here
- python run_tests.py NodeConnectionTestCase
-
- # to run a specific test imported here
- python run_tests.py NodeConnectionTestCase.test_reboot
-
- # to run some test suites elsewhere
- python run_tests.py nova.tests.node_unittest
- python run_tests.py nova.tests.node_unittest.NodeConnectionTestCase
-
-Due to our use of multiprocessing it we frequently get some ignorable
-'Interrupted system call' exceptions after test completion.
-
-"""
-
-import eventlet
-eventlet.monkey_patch()
-
-import __main__
-import gettext
-import os
-import sys
-
-gettext.install('nova', unicode=1)
-
-from twisted.scripts import trial as trial_script
-
-from nova import flags
-from nova import twistd
-
-from nova.tests.access_unittest import *
-from nova.tests.api_unittest import *
-from nova.tests.auth_unittest import *
-from nova.tests.cloud_unittest import *
-from nova.tests.compute_unittest import *
-from nova.tests.flags_unittest import *
-from nova.tests.middleware_unittest import *
-from nova.tests.misc_unittest import *
-from nova.tests.network_unittest import *
-#from nova.tests.objectstore_unittest import *
-from nova.tests.quota_unittest import *
-from nova.tests.rpc_unittest import *
-from nova.tests.scheduler_unittest import *
-from nova.tests.service_unittest import *
-from nova.tests.twistd_unittest import *
-from nova.tests.virt_unittest import *
-from nova.tests.volume_unittest import *
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_bool('flush_db', True,
- 'Flush the database before running fake tests')
-flags.DEFINE_string('tests_stderr', 'run_tests.err.log',
- 'Path to where to pipe STDERR during test runs.'
- ' Default = "run_tests.err.log"')
-
-
-if __name__ == '__main__':
- OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
- config = OptionsClass()
- argv = config.parseOptions()
-
- FLAGS.verbose = True
-
- # TODO(termie): these should make a call instead of doing work on import
- if FLAGS.fake_tests:
- from nova.tests.fake_flags import *
- else:
- from nova.tests.real_flags import *
-
- # Establish redirect for STDERR
- sys.stderr.flush()
- err = open(FLAGS.tests_stderr, 'w+', 0)
- os.dup2(err.fileno(), sys.stderr.fileno())
-
- if len(argv) == 1 and len(config['tests']) == 0:
- # If no tests were specified run the ones imported in this file
- # NOTE(termie): "tests" is not a flag, just some Trial related stuff
- config['tests'].update(['__main__'])
- elif len(config['tests']):
- # If we specified tests check first whether they are in __main__
- for arg in config['tests']:
- key = arg.split('.')[0]
- if hasattr(__main__, key):
- config['tests'].remove(arg)
- config['tests'].add('__main__.%s' % arg)
-
- trial_script._initialDebugSetup(config)
- trialRunner = trial_script._makeRunner(config)
- suite = trial_script._getSuite(config)
- if config['until-failure']:
- test_result = trialRunner.runUntilFailure(suite)
- else:
- test_result = trialRunner.run(suite)
- if config.tracer:
- sys.settrace(None)
- results = config.tracer.results()
- results.write_results(show_missing=1, summary=False,
- coverdir=config.coverdir)
- sys.exit(not test_result.wasSuccessful())
diff --git a/run_tests.sh b/run_tests.sh
index a11dcd7cc..ffb0b6295 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -21,6 +21,7 @@ function process_option {
-V|--virtual-env) let always_venv=1; let never_venv=0;;
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
-f|--force) let force=1;;
+ *) noseargs="$noseargs $1"
esac
}
@@ -29,14 +30,18 @@ with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
+noseargs=
for arg in "$@"; do
process_option $arg
done
+NOSETESTS="nosetests -v $noseargs"
+
if [ $never_venv -eq 1 ]; then
# Just run the test suites in current environment
- python run_tests.py
+ rm -f nova.sqlite
+ $NOSETESTS
exit
fi
@@ -47,7 +52,8 @@ if [ $force -eq 1 ]; then
fi
if [ -e ${venv} ]; then
- ${with_venv} python run_tests.py $@
+ ${with_venv} rm -f nova.sqlite
+ ${with_venv} $NOSETESTS
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
@@ -59,9 +65,11 @@ else
# Install the virtualenv and run the test suite in it
python tools/install_venv.py
else
- python run_tests.py
+ rm -f nova.sqlite
+ $NOSETESTS
exit
fi
fi
- ${with_venv} python run_tests.py $@
+ ${with_venv} rm -f nova.sqlite
+ ${with_venv} $NOSETESTS
fi
diff --git a/setup.py b/setup.py
index d88bc1e6f..1abf4d9fe 100644
--- a/setup.py
+++ b/setup.py
@@ -58,6 +58,7 @@ setup(name='nova',
'build_sphinx' : local_BuildDoc },
packages=find_packages(exclude=['bin', 'smoketests']),
include_package_data=True,
+ test_suite='nose.collector',
scripts=['bin/nova-api',
'bin/nova-compute',
'bin/nova-dhcpbridge',
diff --git a/tools/clean-vlans b/tools/clean-vlans
index f5b0295ad..820a9dbe5 100755
--- a/tools/clean-vlans
+++ b/tools/clean-vlans
@@ -17,6 +17,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+export LC_ALL=C
+
sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down
sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo brctl delbr foo
sudo ifconfig -a | grep vlan | grep -v vlan124 | grep -v vlan5 | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down
diff --git a/tools/setup_iptables.sh b/tools/setup_iptables.sh
index 673353eb4..8be8cd812 100755
--- a/tools/setup_iptables.sh
+++ b/tools/setup_iptables.sh
@@ -36,7 +36,7 @@ else
# NOTE(vish): This will just get the first ip in the list, so if you
# have more than one eth device set up, this will fail, and
# you should explicitly pass in the ip of the instance
- IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
+ IP=`LC_ALL=C ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
fi
if [ -n "$3" ]; then