summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/api/openstack/common.py1
-rw-r--r--nova/tests/api/openstack/fakes.py114
-rw-r--r--nova/tests/api/openstack/test_accounts.py125
-rw-r--r--nova/tests/api/openstack/test_adminapi.py2
-rw-r--r--nova/tests/api/openstack/test_auth.py54
-rw-r--r--nova/tests/api/openstack/test_common.py20
-rw-r--r--nova/tests/api/openstack/test_flavors.py2
-rw-r--r--nova/tests/api/openstack/test_images.py17
-rw-r--r--nova/tests/api/openstack/test_servers.py579
-rw-r--r--nova/tests/api/openstack/test_users.py141
-rw-r--r--nova/tests/api/openstack/test_zones.py27
-rw-r--r--nova/tests/api/test_wsgi.py201
-rw-r--r--nova/tests/db/fakes.py3
-rw-r--r--nova/tests/fake_flags.py1
-rw-r--r--nova/tests/integrated/__init__.py20
-rw-r--r--nova/tests/integrated/api/__init__.py20
-rw-r--r--nova/tests/integrated/api/client.py212
-rw-r--r--nova/tests/test_cloud.py30
-rw-r--r--nova/tests/test_compute.py283
-rw-r--r--nova/tests/test_console.py2
-rw-r--r--nova/tests/test_direct.py6
-rw-r--r--nova/tests/test_misc.py60
-rw-r--r--nova/tests/test_network.py184
-rw-r--r--nova/tests/test_quota.py102
-rw-r--r--nova/tests/test_scheduler.py627
-rw-r--r--nova/tests/test_service.py42
-rw-r--r--nova/tests/test_virt.py356
-rw-r--r--nova/tests/test_volume.py197
-rw-r--r--nova/tests/test_xenapi.py103
29 files changed, 3320 insertions, 211 deletions
diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py
index 3f9c7d3cf..74bb8729a 100644
--- a/nova/tests/api/openstack/common.py
+++ b/nova/tests/api/openstack/common.py
@@ -28,6 +28,7 @@ def webob_factory(url):
def web_request(url, method=None, body=None):
req = webob.Request.blank("%s%s" % (base_url, url))
if method:
+ req.content_type = "application/json"
req.method = method
if body:
req.body = json.dumps(body)
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 49ce8c1b5..0bbb1c890 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -25,8 +25,8 @@ import webob.dec
from paste import urlmap
from glance import client as glance_client
+from glance.common import exception as glance_exc
-from nova import auth
from nova import context
from nova import exception as exc
from nova import flags
@@ -35,6 +35,7 @@ import nova.api.openstack.auth
from nova.api import openstack
from nova.api.openstack import auth
from nova.api.openstack import ratelimiting
+from nova.auth.manager import User, Project
from nova.image import glance
from nova.image import local
from nova.image import service
@@ -68,8 +69,6 @@ def fake_auth_init(self, application):
@webob.dec.wsgify
def fake_wsgi(self, req):
req.environ['nova.context'] = context.RequestContext(1, 1)
- if req.body:
- req.environ['inst_dict'] = json.loads(req.body)
return self.application
@@ -84,10 +83,17 @@ def wsgi_app(inner_application=None):
return mapper
-def stub_out_key_pair_funcs(stubs):
+def stub_out_key_pair_funcs(stubs, have_key_pair=True):
def key_pair(context, user_id):
return [dict(name='key', public_key='public_key')]
- stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
+
+ def no_key_pair(context, user_id):
+ return []
+
+ if have_key_pair:
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
+ else:
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
def stub_out_image_service(stubs):
@@ -149,25 +155,26 @@ def stub_out_glance(stubs, initial_fixtures=None):
for f in self.fixtures:
if f['id'] == image_id:
return f
- return None
+ raise glance_exc.NotFound
- def fake_add_image(self, image_meta):
+ def fake_add_image(self, image_meta, data=None):
id = ''.join(random.choice(string.letters) for _ in range(20))
image_meta['id'] = id
self.fixtures.append(image_meta)
- return id
+ return image_meta
- def fake_update_image(self, image_id, image_meta):
+ def fake_update_image(self, image_id, image_meta, data=None):
f = self.fake_get_image_meta(image_id)
if not f:
- raise exc.NotFound
+ raise glance_exc.NotFound
f.update(image_meta)
+ return f
def fake_delete_image(self, image_id):
f = self.fake_get_image_meta(image_id)
if not f:
- raise exc.NotFound
+ raise glance_exc.NotFound
self.fixtures.remove(f)
@@ -227,21 +234,102 @@ class FakeAuthDatabase(object):
class FakeAuthManager(object):
auth_data = {}
+ projects = {}
+
+ @classmethod
+ def clear_fakes(cls):
+ cls.auth_data = {}
+ cls.projects = {}
+
+ @classmethod
+ def reset_fake_data(cls):
+ cls.auth_data = dict(acc1=User('guy1', 'guy1', 'acc1',
+ 'fortytwo!', False))
+ cls.projects = dict(testacct=Project('testacct',
+ 'testacct',
+ 'guy1',
+ 'test',
+ []))
def add_user(self, key, user):
FakeAuthManager.auth_data[key] = user
+ def get_users(self):
+ return FakeAuthManager.auth_data.values()
+
def get_user(self, uid):
for k, v in FakeAuthManager.auth_data.iteritems():
if v.id == uid:
return v
return None
- def get_project(self, pid):
+ def delete_user(self, uid):
+ for k, v in FakeAuthManager.auth_data.items():
+ if v.id == uid:
+ del FakeAuthManager.auth_data[k]
return None
+ def create_user(self, name, access=None, secret=None, admin=False):
+ u = User(name, name, access, secret, admin)
+ FakeAuthManager.auth_data[access] = u
+ return u
+
+ def modify_user(self, user_id, access=None, secret=None, admin=None):
+ user = None
+ for k, v in FakeAuthManager.auth_data.iteritems():
+ if v.id == user_id:
+ user = v
+ if user:
+ user.access = access
+ user.secret = secret
+ if admin is not None:
+ user.admin = admin
+
+ def is_admin(self, user):
+ return user.admin
+
+ def is_project_member(self, user, project):
+ return ((user.id in project.member_ids) or
+ (user.id == project.project_manager_id))
+
+ def create_project(self, name, manager_user, description=None,
+ member_users=None):
+ member_ids = [User.safe_id(m) for m in member_users] \
+ if member_users else []
+ p = Project(name, name, User.safe_id(manager_user),
+ description, member_ids)
+ FakeAuthManager.projects[name] = p
+ return p
+
+ def delete_project(self, pid):
+ if pid in FakeAuthManager.projects:
+ del FakeAuthManager.projects[pid]
+
+ def modify_project(self, project, manager_user=None, description=None):
+ p = FakeAuthManager.projects.get(project)
+ p.project_manager_id = User.safe_id(manager_user)
+ p.description = description
+
+ def get_project(self, pid):
+ p = FakeAuthManager.projects.get(pid)
+ if p:
+ return p
+ else:
+ raise exc.NotFound
+
+ def get_projects(self, user=None):
+ if not user:
+ return FakeAuthManager.projects.values()
+ else:
+ return [p for p in FakeAuthManager.projects.values()
+ if (user.id in p.member_ids) or
+ (user.id == p.project_manager_id)]
+
def get_user_from_access_key(self, key):
- return FakeAuthManager.auth_data.get(key, None)
+ try:
+ return FakeAuthManager.auth_data[key]
+ except KeyError:
+ raise exc.NotFound
class FakeRateLimiter(object):
diff --git a/nova/tests/api/openstack/test_accounts.py b/nova/tests/api/openstack/test_accounts.py
new file mode 100644
index 000000000..60edce769
--- /dev/null
+++ b/nova/tests/api/openstack/test_accounts.py
@@ -0,0 +1,125 @@
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import json
+
+import stubout
+import webob
+
+import nova.api
+import nova.api.openstack.auth
+from nova import context
+from nova import flags
+from nova import test
+from nova.auth.manager import User
+from nova.tests.api.openstack import fakes
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+def fake_init(self):
+ self.manager = fakes.FakeAuthManager()
+
+
+def fake_admin_check(self, req):
+ return True
+
+
+class AccountsTest(test.TestCase):
+ def setUp(self):
+ super(AccountsTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(nova.api.openstack.accounts.Controller, '__init__',
+ fake_init)
+ self.stubs.Set(nova.api.openstack.accounts.Controller, '_check_admin',
+ fake_admin_check)
+ fakes.FakeAuthManager.clear_fakes()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+
+ self.allow_admin = FLAGS.allow_admin_api
+ FLAGS.allow_admin_api = True
+ fakemgr = fakes.FakeAuthManager()
+ joeuser = User('guy1', 'guy1', 'acc1', 'fortytwo!', False)
+ superuser = User('guy2', 'guy2', 'acc2', 'swordfish', True)
+ fakemgr.add_user(joeuser.access, joeuser)
+ fakemgr.add_user(superuser.access, superuser)
+ fakemgr.create_project('test1', joeuser)
+ fakemgr.create_project('test2', superuser)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+ super(AccountsTest, self).tearDown()
+
+ def test_get_account(self):
+ req = webob.Request.blank('/v1.0/accounts/test1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res_dict['account']['id'], 'test1')
+ self.assertEqual(res_dict['account']['name'], 'test1')
+ self.assertEqual(res_dict['account']['manager'], 'guy1')
+ self.assertEqual(res.status_int, 200)
+
+ def test_account_delete(self):
+ req = webob.Request.blank('/v1.0/accounts/test1')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertTrue('test1' not in fakes.FakeAuthManager.projects)
+ self.assertEqual(res.status_int, 200)
+
+ def test_account_create(self):
+ body = dict(account=dict(description='test account',
+ manager='guy1'))
+ req = webob.Request.blank('/v1.0/accounts/newacct')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['account']['id'], 'newacct')
+ self.assertEqual(res_dict['account']['name'], 'newacct')
+ self.assertEqual(res_dict['account']['description'], 'test account')
+ self.assertEqual(res_dict['account']['manager'], 'guy1')
+ self.assertTrue('newacct' in
+ fakes.FakeAuthManager.projects)
+ self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 3)
+
+ def test_account_update(self):
+ body = dict(account=dict(description='test account',
+ manager='guy2'))
+ req = webob.Request.blank('/v1.0/accounts/test1')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['account']['id'], 'test1')
+ self.assertEqual(res_dict['account']['name'], 'test1')
+ self.assertEqual(res_dict['account']['description'], 'test account')
+ self.assertEqual(res_dict['account']['manager'], 'guy2')
+ self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 2)
diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py
index dfce1b127..4568cb9f5 100644
--- a/nova/tests/api/openstack/test_adminapi.py
+++ b/nova/tests/api/openstack/test_adminapi.py
@@ -35,7 +35,7 @@ class AdminAPITest(test.TestCase):
def setUp(self):
super(AdminAPITest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index ff8d42a14..0448ed701 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -51,11 +51,12 @@ class Test(test.TestCase):
def test_authorize_user(self):
f = fakes.FakeAuthManager()
- f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
+ f.add_user('user1_key',
+ nova.auth.manager.User(1, 'user1', None, None, None))
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
@@ -65,11 +66,13 @@ class Test(test.TestCase):
def test_authorize_token(self):
f = fakes.FakeAuthManager()
- f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
+ u = nova.auth.manager.User(1, 'user1', None, None, None)
+ f.add_user('user1_key', u)
+ f.create_project('user1_project', u)
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
@@ -90,7 +93,7 @@ class Test(test.TestCase):
def test_token_expiry(self):
self.destroy_called = False
- token_hash = 'bacon'
+ token_hash = 'token_hash'
def destroy_token_mock(meh, context, token):
self.destroy_called = True
@@ -107,15 +110,26 @@ class Test(test.TestCase):
bad_token)
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-Token'] = 'bacon'
+ req.headers['X-Auth-Token'] = 'token_hash'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
self.assertEqual(self.destroy_called, True)
- def test_bad_user(self):
+ def test_bad_user_bad_key(self):
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'unknown_user'
+ req.headers['X-Auth-Key'] = 'unknown_user_key'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+ def test_bad_user_good_key(self):
+ f = fakes.FakeAuthManager()
+ u = nova.auth.manager.User(1, 'user1', None, None, None)
+ f.add_user('user1_key', u)
+
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-User'] = 'unknown_user'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
@@ -126,7 +140,7 @@ class Test(test.TestCase):
def test_bad_token(self):
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-Token'] = 'baconbaconbacon'
+ req.headers['X-Auth-Token'] = 'unknown_token'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
@@ -135,11 +149,11 @@ class TestFunctional(test.TestCase):
def test_token_expiry(self):
ctx = context.get_admin_context()
tok = db.auth_token_create(ctx, dict(
- token_hash='bacon',
+ token_hash='test_token_hash',
cdn_management_url='',
server_management_url='',
storage_url='',
- user_id='ham',
+ user_id='user1',
))
db.auth_token_update(ctx, tok.token_hash, dict(
@@ -147,13 +161,13 @@ class TestFunctional(test.TestCase):
))
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-Token'] = 'bacon'
+ req.headers['X-Auth-Token'] = 'test_token_hash'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
def test_token_doesnotexist(self):
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-Token'] = 'ham'
+ req.headers['X-Auth-Token'] = 'nonexistant_token_hash'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
@@ -176,11 +190,13 @@ class TestLimiter(test.TestCase):
def test_authorize_token(self):
f = fakes.FakeAuthManager()
- f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
+ u = nova.auth.manager.User(1, 'user1', None, None, None)
+ f.add_user('user1_key', u)
+ f.create_project('test', u)
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 92023362c..8f57c5b67 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -79,20 +79,14 @@ class LimiterTest(test.TestCase):
Test offset key works with a blank offset.
"""
req = Request.blank('/?offset=')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
def test_limiter_offset_bad(self):
"""
Test offset key works with a BAD offset.
"""
req = Request.blank(u'/?offset=\u0020aa')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
def test_limiter_nothing(self):
"""
@@ -166,18 +160,12 @@ class LimiterTest(test.TestCase):
"""
Test a negative limit.
"""
- def _limit_large():
- limited(self.large, req, max_limit=2000)
-
req = Request.blank('/?limit=-3000')
- self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
def test_limiter_negative_offset(self):
"""
Test a negative offset.
"""
- def _limit_large():
- limited(self.large, req, max_limit=2000)
-
req = Request.blank('/?offset=-30')
- self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index 319767bb5..8280a505f 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -30,7 +30,7 @@ class FlavorsTest(test.TestCase):
def setUp(self):
super(FlavorsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index e232bc3d5..76f758929 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -22,6 +22,8 @@ and as a WSGI layer
import json
import datetime
+import shutil
+import tempfile
import stubout
import webob
@@ -54,7 +56,7 @@ class BaseImageServiceTests(object):
num_images = len(self.service.index(self.context))
- id = self.service.create(self.context, fixture)
+ id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, id)
self.assertEquals(num_images + 1,
@@ -71,7 +73,7 @@ class BaseImageServiceTests(object):
num_images = len(self.service.index(self.context))
- id = self.service.create(self.context, fixture)
+ id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, id)
@@ -89,7 +91,7 @@ class BaseImageServiceTests(object):
'instance_id': None,
'progress': None}
- id = self.service.create(self.context, fixture)
+ id = self.service.create(self.context, fixture)['id']
fixture['status'] = 'in progress'
@@ -118,7 +120,7 @@ class BaseImageServiceTests(object):
ids = []
for fixture in fixtures:
- new_id = self.service.create(self.context, fixture)
+ new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.index(self.context))
@@ -137,14 +139,15 @@ class LocalImageServiceTest(test.TestCase,
def setUp(self):
super(LocalImageServiceTest, self).setUp()
+ self.tempdir = tempfile.mkdtemp()
+ self.flags(images_path=self.tempdir)
self.stubs = stubout.StubOutForTesting()
service_class = 'nova.image.local.LocalImageService'
self.service = utils.import_object(service_class)
self.context = context.RequestContext(None, None)
def tearDown(self):
- self.service.delete_all()
- self.service.delete_imagedir()
+ shutil.rmtree(self.tempdir)
self.stubs.UnsetAll()
super(LocalImageServiceTest, self).tearDown()
@@ -202,7 +205,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.orig_image_service = FLAGS.image_service
FLAGS.image_service = 'nova.image.glance.GlanceImageService'
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index c9566c7e6..03e00af2a 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -15,8 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import base64
import datetime
import json
+import unittest
+from xml.dom import minidom
import stubout
import webob
@@ -120,7 +123,7 @@ class ServersTest(test.TestCase):
def setUp(self):
super(ServersTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
@@ -188,9 +191,38 @@ class ServersTest(test.TestCase):
self.assertEqual(s.get('imageId', None), None)
i += 1
- def test_create_instance(self):
+ def test_get_servers_with_limit(self):
+ req = webob.Request.blank('/v1.0/servers?limit=3')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [0, 1, 2])
+
+ req = webob.Request.blank('/v1.0/servers?limit=aaa')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue('limit' in res.body)
+
+ def test_get_servers_with_offset(self):
+ req = webob.Request.blank('/v1.0/servers?offset=2')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [2, 3, 4])
+
+ req = webob.Request.blank('/v1.0/servers?offset=aaa')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue('offset' in res.body)
+
+ def test_get_servers_with_limit_and_offset(self):
+ req = webob.Request.blank('/v1.0/servers?limit=2&offset=1')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [1, 2])
+
+ def _test_create_instance_helper(self):
+ """Shared implementation for tests below that create instance"""
def instance_create(context, inst):
- return {'id': '1', 'display_name': ''}
+ return {'id': '1', 'display_name': 'server_test'}
def server_update(context, id, params):
return instance_create(context, id)
@@ -231,11 +263,25 @@ class ServersTest(test.TestCase):
req = webob.Request.blank('/v1.0/servers')
req.method = 'POST'
req.body = json.dumps(body)
+ req.headers["Content-Type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
+ server = json.loads(res.body)['server']
+ self.assertEqual('serv', server['adminPass'][:4])
+ self.assertEqual(16, len(server['adminPass']))
+ self.assertEqual('server_test', server['name'])
+ self.assertEqual('1', server['id'])
+
self.assertEqual(res.status_int, 200)
+ def test_create_instance(self):
+ self._test_create_instance_helper()
+
+ def test_create_instance_no_key_pair(self):
+ fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
+ self._test_create_instance_helper()
+
def test_update_no_body(self):
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
@@ -405,7 +451,8 @@ class ServersTest(test.TestCase):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
- req = webob.Request.blank('/v1.0/servers/1/inject_network_info')
+ req = webob.Request.blank(
+ '/v1.0/servers/1/inject_network_info')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
@@ -563,5 +610,529 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
+
+class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
+
+ def setUp(self):
+ self.deserializer = servers.ServerCreateRequestXMLDeserializer()
+
+ def test_minimal_request(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_metadata(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {},
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_personality(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "personality": [],
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_metadata_and_personality(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata/>
+ <personality/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {},
+ "personality": [],
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_metadata_and_personality_reversed(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality/>
+ <metadata/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {},
+ "personality": [],
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_one_personality(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality>
+ <file path="/etc/conf">aabbccdd</file>
+ </personality>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_two_personalities(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file path="/etc/conf">aabbccdd</file>
+<file path="/etc/sudoers">abcd</file></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": "aabbccdd"},
+ {"path": "/etc/sudoers", "contents": "abcd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_second_personality_node_ignored(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality>
+ <file path="/etc/conf">aabbccdd</file>
+ </personality>
+ <personality>
+ <file path="/etc/ignoreme">anything</file>
+ </personality>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_personality_missing_path(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file>aabbccdd</file></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"contents": "aabbccdd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_personality_empty_contents(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file path="/etc/conf"></file></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": ""}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_personality_empty_contents_variation(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file path="/etc/conf"/></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": ""}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_metadata(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha">beta</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": "beta"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_two_metadata(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha">beta</meta>
+ <meta key="foo">bar</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": "beta", "foo": "bar"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_metadata_missing_value(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha"></meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": ""}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_two_metadata_missing_value(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha"/>
+ <meta key="delta"/>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": "", "delta": ""}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_metadata_missing_key(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta>beta</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"": "beta"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_two_metadata_missing_key(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta>beta</meta>
+ <meta>gamma</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"": "gamma"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_metadata_duplicate_key(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="foo">bar</meta>
+ <meta key="foo">baz</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"foo": "baz"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_canonical_request_from_docs(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">\
+ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp\
+dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k\
+IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs\
+c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g\
+QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo\
+ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv\
+dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy\
+c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6\
+b25zLiINCg0KLVJpY2hhcmQgQmFjaA==</file>
+ </personality>
+</server>"""
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {
+ "My Server Name": "Apache1",
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": """\
+ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp\
+dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k\
+IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs\
+c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g\
+QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo\
+ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv\
+dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy\
+c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6\
+b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
+ },
+ ],
+ }}
+ request = self.deserializer.deserialize(serial_request)
+ self.assertEqual(request, expected)
+
+
+class TestServerInstanceCreation(test.TestCase):
+
+ def setUp(self):
+ super(TestServerInstanceCreation, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.allow_admin = FLAGS.allow_admin_api
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+ super(TestServerInstanceCreation, self).tearDown()
+
+ def _setup_mock_compute_api_for_personality(self):
+
+ class MockComputeAPI(object):
+
+ def __init__(self):
+ self.injected_files = None
+
+ def create(self, *args, **kwargs):
+ if 'injected_files' in kwargs:
+ self.injected_files = kwargs['injected_files']
+ else:
+ self.injected_files = None
+ return [{'id': '1234', 'display_name': 'fakeinstance'}]
+
+ def set_admin_password(self, *args, **kwargs):
+ pass
+
+ def make_stub_method(canned_return):
+ def stub_method(*args, **kwargs):
+ return canned_return
+ return stub_method
+
+ compute_api = MockComputeAPI()
+ self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api))
+ self.stubs.Set(nova.api.openstack.servers.Controller,
+ '_get_kernel_ramdisk_from_image', make_stub_method((1, 1)))
+ self.stubs.Set(nova.api.openstack.common,
+ 'get_image_id_from_image_hash', make_stub_method(2))
+ return compute_api
+
+ def _create_personality_request_dict(self, personality_files):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageId'] = 1
+ server['flavorId'] = 1
+ if personality_files is not None:
+ personalities = []
+ for path, contents in personality_files:
+ personalities.append({'path': path, 'contents': contents})
+ server['personality'] = personalities
+ return {'server': server}
+
+ def _get_create_request_json(self, body_dict):
+ req = webob.Request.blank('/v1.0/servers')
+ req.content_type = 'application/json'
+ req.method = 'POST'
+ req.body = json.dumps(body_dict)
+ return req
+
+ def _run_create_instance_with_mock_compute_api(self, request):
+ compute_api = self._setup_mock_compute_api_for_personality()
+ response = request.get_response(fakes.wsgi_app())
+ return compute_api, response
+
+ def _format_xml_request_body(self, body_dict):
+ server = body_dict['server']
+ body_parts = []
+ body_parts.extend([
+ '<?xml version="1.0" encoding="UTF-8"?>',
+ '<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"',
+ ' name="%s" imageId="%s" flavorId="%s">' % (
+ server['name'], server['imageId'], server['flavorId'])])
+ if 'metadata' in server:
+ metadata = server['metadata']
+ body_parts.append('<metadata>')
+ for item in metadata.iteritems():
+ body_parts.append('<meta key="%s">%s</meta>' % item)
+ body_parts.append('</metadata>')
+ if 'personality' in server:
+ personalities = server['personality']
+ body_parts.append('<personality>')
+ for file in personalities:
+ item = (file['path'], file['contents'])
+ body_parts.append('<file path="%s">%s</file>' % item)
+ body_parts.append('</personality>')
+ body_parts.append('</server>')
+ return ''.join(body_parts)
+
+ def _get_create_request_xml(self, body_dict):
+ req = webob.Request.blank('/v1.0/servers')
+ req.content_type = 'application/xml'
+ req.accept = 'application/xml'
+ req.method = 'POST'
+ req.body = self._format_xml_request_body(body_dict)
+ return req
+
+ def _create_instance_with_personality_json(self, personality):
+ body_dict = self._create_personality_request_dict(personality)
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ return request, response, compute_api.injected_files
+
+ def _create_instance_with_personality_xml(self, personality):
+ body_dict = self._create_personality_request_dict(personality)
+ request = self._get_create_request_xml(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ return request, response, compute_api.injected_files
+
+ def test_create_instance_with_no_personality(self):
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality=None)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [])
+
+ def test_create_instance_with_no_personality_xml(self):
+ request, response, injected_files = \
+ self._create_instance_with_personality_xml(personality=None)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [])
+
+ def test_create_instance_with_personality(self):
+ path = '/my/file/path'
+ contents = '#!/bin/bash\necho "Hello, World!"\n'
+ b64contents = base64.b64encode(contents)
+ personality = [(path, b64contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [(path, contents)])
+
+ def test_create_instance_with_personality_xml(self):
+ path = '/my/file/path'
+ contents = '#!/bin/bash\necho "Hello, World!"\n'
+ b64contents = base64.b64encode(contents)
+ personality = [(path, b64contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_xml(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [(path, contents)])
+
+ def test_create_instance_with_personality_no_path(self):
+ personality = [('/remove/this/path',
+ base64.b64encode('my\n\file\ncontents'))]
+ body_dict = self._create_personality_request_dict(personality)
+ del body_dict['server']['personality'][0]['path']
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def _test_create_instance_with_personality_no_path_xml(self):
+ personality = [('/remove/this/path',
+ base64.b64encode('my\n\file\ncontents'))]
+ body_dict = self._create_personality_request_dict(personality)
+ request = self._get_create_request_xml(body_dict)
+ request.body = request.body.replace(' path="/remove/this/path"', '')
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def test_create_instance_with_personality_no_contents(self):
+ personality = [('/test/path',
+ base64.b64encode('remove\nthese\ncontents'))]
+ body_dict = self._create_personality_request_dict(personality)
+ del body_dict['server']['personality'][0]['contents']
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def test_create_instance_with_personality_not_a_list(self):
+ personality = [('/test/path', base64.b64encode('test\ncontents\n'))]
+ body_dict = self._create_personality_request_dict(personality)
+ body_dict['server']['personality'] = \
+ body_dict['server']['personality'][0]
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def test_create_instance_with_personality_with_non_b64_content(self):
+ path = '/my/file/path'
+ contents = '#!/bin/bash\necho "Oh no!"\n'
+ personality = [(path, contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(injected_files, None)
+
+ def test_create_instance_with_three_personalities(self):
+ files = [
+ ('/etc/sudoers', 'ALL ALL=NOPASSWD: ALL\n'),
+ ('/etc/motd', 'Enjoy your root access!\n'),
+ ('/etc/dovecot.conf', 'dovecot\nconfig\nstuff\n'),
+ ]
+ personality = []
+ for path, content in files:
+ personality.append((path, base64.b64encode(content)))
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, files)
+
+ def test_create_instance_personality_empty_content(self):
+ path = '/my/file/path'
+ contents = ''
+ personality = [(path, contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [(path, contents)])
+
+ def test_create_instance_admin_pass_json(self):
+ request, response, dummy = \
+ self._create_instance_with_personality_json(None)
+ self.assertEquals(response.status_int, 200)
+ response = json.loads(response.body)
+ self.assertTrue('adminPass' in response['server'])
+ self.assertTrue(response['server']['adminPass'].startswith('fake'))
+
+ def test_create_instance_admin_pass_xml(self):
+ request, response, dummy = \
+ self._create_instance_with_personality_xml(None)
+ self.assertEquals(response.status_int, 200)
+ dom = minidom.parseString(response.body)
+ server = dom.childNodes[0]
+ self.assertEquals(server.nodeName, 'server')
+ self.assertTrue(server.getAttribute('adminPass').startswith('fake'))
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/nova/tests/api/openstack/test_users.py b/nova/tests/api/openstack/test_users.py
new file mode 100644
index 000000000..2dda4319b
--- /dev/null
+++ b/nova/tests/api/openstack/test_users.py
@@ -0,0 +1,141 @@
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import stubout
+import webob
+
+import nova.api
+import nova.api.openstack.auth
+from nova import context
+from nova import flags
+from nova import test
+from nova.auth.manager import User, Project
+from nova.tests.api.openstack import fakes
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+def fake_init(self):
+ self.manager = fakes.FakeAuthManager()
+
+
+def fake_admin_check(self, req):
+ return True
+
+
+class UsersTest(test.TestCase):
+ def setUp(self):
+ super(UsersTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(nova.api.openstack.users.Controller, '__init__',
+ fake_init)
+ self.stubs.Set(nova.api.openstack.users.Controller, '_check_admin',
+ fake_admin_check)
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.projects = dict(testacct=Project('testacct',
+ 'testacct',
+ 'guy1',
+ 'test',
+ []))
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+
+ self.allow_admin = FLAGS.allow_admin_api
+ FLAGS.allow_admin_api = True
+ fakemgr = fakes.FakeAuthManager()
+ fakemgr.add_user('acc1', User('guy1', 'guy1', 'acc1',
+ 'fortytwo!', False))
+ fakemgr.add_user('acc2', User('guy2', 'guy2', 'acc2',
+ 'swordfish', True))
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+ super(UsersTest, self).tearDown()
+
+ def test_get_user_list(self):
+ req = webob.Request.blank('/v1.0/users')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(len(res_dict['users']), 2)
+
+ def test_get_user_by_id(self):
+ req = webob.Request.blank('/v1.0/users/guy2')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res_dict['user']['id'], 'guy2')
+ self.assertEqual(res_dict['user']['name'], 'guy2')
+ self.assertEqual(res_dict['user']['secret'], 'swordfish')
+ self.assertEqual(res_dict['user']['admin'], True)
+ self.assertEqual(res.status_int, 200)
+
+ def test_user_delete(self):
+ req = webob.Request.blank('/v1.0/users/guy1')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertTrue('guy1' not in [u.id for u in
+ fakes.FakeAuthManager.auth_data.values()])
+ self.assertEqual(res.status_int, 200)
+
+ def test_user_create(self):
+ body = dict(user=dict(name='test_guy',
+ access='acc3',
+ secret='invasionIsInNormandy',
+ admin=True))
+ req = webob.Request.blank('/v1.0/users')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'POST'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['user']['id'], 'test_guy')
+ self.assertEqual(res_dict['user']['name'], 'test_guy')
+ self.assertEqual(res_dict['user']['access'], 'acc3')
+ self.assertEqual(res_dict['user']['secret'], 'invasionIsInNormandy')
+ self.assertEqual(res_dict['user']['admin'], True)
+ self.assertTrue('test_guy' in [u.id for u in
+ fakes.FakeAuthManager.auth_data.values()])
+ self.assertEqual(len(fakes.FakeAuthManager.auth_data.values()), 3)
+
+ def test_user_update(self):
+ body = dict(user=dict(name='guy2',
+ access='acc2',
+ secret='invasionIsInNormandy'))
+ req = webob.Request.blank('/v1.0/users/guy2')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['user']['id'], 'guy2')
+ self.assertEqual(res_dict['user']['name'], 'guy2')
+ self.assertEqual(res_dict['user']['access'], 'acc2')
+ self.assertEqual(res_dict['user']['secret'], 'invasionIsInNormandy')
+ self.assertEqual(res_dict['user']['admin'], True)
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index a40d46749..12d39fd29 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -58,7 +58,7 @@ def zone_get_all_scheduler(*args):
dict(id=1, api_url='http://example.com', username='bob',
password='xxx'),
dict(id=2, api_url='http://example.org', username='alice',
- password='qwerty')
+ password='qwerty'),
]
@@ -71,7 +71,7 @@ def zone_get_all_db(context):
dict(id=1, api_url='http://example.com', username='bob',
password='xxx'),
dict(id=2, api_url='http://example.org', username='alice',
- password='qwerty')
+ password='qwerty'),
]
@@ -83,7 +83,7 @@ class ZonesTest(test.TestCase):
def setUp(self):
super(ZonesTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
@@ -120,24 +120,27 @@ class ZonesTest(test.TestCase):
self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler_empty)
self.stubs.Set(nova.db, 'zone_get_all', zone_get_all_db)
req = webob.Request.blank('/v1.0/zones')
+ req.headers["Content-Type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
self.assertEqual(len(res_dict['zones']), 2)
def test_get_zone_by_id(self):
req = webob.Request.blank('/v1.0/zones/1')
+ req.headers["Content-Type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
self.assertFalse('password' in res_dict['zone'])
- self.assertEqual(res.status_int, 200)
def test_zone_delete(self):
req = webob.Request.blank('/v1.0/zones/1')
+ req.headers["Content-Type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
@@ -146,13 +149,14 @@ class ZonesTest(test.TestCase):
body = dict(zone=dict(api_url='http://example.com', username='fred',
password='fubar'))
req = webob.Request.blank('/v1.0/zones')
+ req.headers["Content-Type"] = "application/json"
req.method = 'POST'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
self.assertFalse('username' in res_dict['zone'])
@@ -160,20 +164,21 @@ class ZonesTest(test.TestCase):
def test_zone_update(self):
body = dict(zone=dict(username='zeb', password='sneaky'))
req = webob.Request.blank('/v1.0/zones/1')
+ req.headers["Content-Type"] = "application/json"
req.method = 'PUT'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
self.assertFalse('username' in res_dict['zone'])
def test_zone_info(self):
FLAGS.zone_name = 'darksecret'
- FLAGS.zone_capabilities = 'cap1:a,b;cap2:c,d'
+ FLAGS.zone_capabilities = ['cap1=a;b', 'cap2=c;d']
self.stubs.Set(api, '_call_scheduler', zone_caps)
body = dict(zone=dict(username='zeb', password='sneaky'))
@@ -183,5 +188,5 @@ class ZonesTest(test.TestCase):
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['zone']['name'], 'darksecret')
- self.assertEqual(res_dict['zone']['cap1'], 'a,b')
- self.assertEqual(res_dict['zone']['cap2'], 'c,d')
+ self.assertEqual(res_dict['zone']['cap1'], 'a;b')
+ self.assertEqual(res_dict['zone']['cap2'], 'c;d')
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py
index 2c7852214..b1a849cf9 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/api/test_wsgi.py
@@ -21,11 +21,13 @@
Test WSGI basics and provide some helper functions for other WSGI tests.
"""
+import json
from nova import test
import routes
import webob
+from nova import exception
from nova import wsgi
@@ -66,63 +68,164 @@ class Test(test.TestCase):
result = webob.Request.blank('/bad').get_response(Router())
self.assertNotEqual(result.body, "Router result")
- def test_controller(self):
- class Controller(wsgi.Controller):
- """Test controller to call from router."""
- test = self
+class ControllerTest(test.TestCase):
- def show(self, req, id): # pylint: disable-msg=W0622,C0103
- """Default action called for requests with an ID."""
- self.test.assertEqual(req.path_info, '/tests/123')
- self.test.assertEqual(id, '123')
- return id
+ class TestRouter(wsgi.Router):
- class Router(wsgi.Router):
- """Test router."""
+ class TestController(wsgi.Controller):
- def __init__(self):
- mapper = routes.Mapper()
- mapper.resource("test", "tests", controller=Controller())
- super(Router, self).__init__(mapper)
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "test": ["id"]}}}
- result = webob.Request.blank('/tests/123').get_response(Router())
- self.assertEqual(result.body, "123")
- result = webob.Request.blank('/test/123').get_response(Router())
- self.assertNotEqual(result.body, "123")
+ def show(self, req, id): # pylint: disable-msg=W0622,C0103
+ return {"test": {"id": id}}
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.resource("test", "tests", controller=self.TestController())
+ wsgi.Router.__init__(self, mapper)
+
+ def test_show(self):
+ request = wsgi.Request.blank('/tests/123')
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(json.loads(result.body), {"test": {"id": "123"}})
+
+ def test_response_content_type_from_accept_xml(self):
+ request = webob.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml"
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/xml")
+
+ def test_response_content_type_from_accept_json(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/json"
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/json")
+
+ def test_response_content_type_from_query_extension_xml(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/xml")
+
+ def test_response_content_type_from_query_extension_json(self):
+ request = wsgi.Request.blank('/tests/123.json')
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/json")
+
+ def test_response_content_type_default_when_unsupported(self):
+ request = wsgi.Request.blank('/tests/123.unsupported')
+ request.headers["Accept"] = "application/unsupported1"
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.status_int, 200)
+ self.assertEqual(result.headers["Content-Type"], "application/json")
+
+
+class RequestTest(test.TestCase):
+
+ def test_request_content_type_missing(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.body = "<body />"
+ self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
+
+ def test_request_content_type_unsupported(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "text/html"
+ request.body = "asdf<br />"
+ self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
+
+ def test_content_type_from_accept_xml(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml, application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = \
+ "application/json; q=0.3, application/xml; q=0.9"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_from_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123.json')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123.invalid')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_accept_and_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_accept_default(self):
+ request = wsgi.Request.blank('/tests/123.unsupported')
+ request.headers["Accept"] = "application/unsupported1"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
class SerializerTest(test.TestCase):
- def match(self, url, accept, expect):
+ def test_xml(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_xml = '<servers><a>(2,3)</a></servers>'
+ serializer = wsgi.Serializer()
+ result = serializer.serialize(input_dict, "application/xml")
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_xml)
+
+ def test_json(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
- req = webob.Request.blank(url, headers=dict(Accept=accept))
- result = wsgi.Serializer(req.environ).to_content_type(input_dict)
+ serializer = wsgi.Serializer()
+ result = serializer.serialize(input_dict, "application/json")
result = result.replace('\n', '').replace(' ', '')
- if expect == 'xml':
- self.assertEqual(result, expected_xml)
- elif expect == 'json':
- self.assertEqual(result, expected_json)
- else:
- raise "Bad expect value"
-
- def test_basic(self):
- self.match('/servers/4.json', None, expect='json')
- self.match('/servers/4', 'application/json', expect='json')
- self.match('/servers/4', 'application/xml', expect='xml')
- self.match('/servers/4.xml', None, expect='xml')
-
- def test_defaults_to_json(self):
- self.match('/servers/4', None, expect='json')
- self.match('/servers/4', 'text/html', expect='json')
-
- def test_suffix_takes_precedence_over_accept_header(self):
- self.match('/servers/4.xml', 'application/json', expect='xml')
- self.match('/servers/4.xml.', 'application/json', expect='json')
-
- def test_deserialize(self):
+ self.assertEqual(result, expected_json)
+
+ def test_unsupported_content_type(self):
+ serializer = wsgi.Serializer()
+ self.assertRaises(exception.InvalidContentType, serializer.serialize,
+ {}, "text/null")
+
+ def test_deserialize_json(self):
+ data = """{"a": {
+ "a1": "1",
+ "a2": "2",
+ "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
+ "d": {"e": "1"},
+ "f": "1"}}"""
+ as_dict = dict(a={
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
+ 'd': {'e': '1'},
+ 'f': '1'})
+ metadata = {}
+ serializer = wsgi.Serializer(metadata)
+ self.assertEqual(serializer.deserialize(data, "application/json"),
+ as_dict)
+
+ def test_deserialize_xml(self):
xml = """
<a a1="1" a2="2">
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
@@ -137,11 +240,13 @@ class SerializerTest(test.TestCase):
'd': {'e': '1'},
'f': '1'})
metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})}
- serializer = wsgi.Serializer({}, metadata)
- self.assertEqual(serializer.deserialize(xml), as_dict)
+ serializer = wsgi.Serializer(metadata)
+ self.assertEqual(serializer.deserialize(xml, "application/xml"),
+ as_dict)
def test_deserialize_empty_xml(self):
xml = """<a></a>"""
as_dict = {"a": {}}
- serializer = wsgi.Serializer({})
- self.assertEqual(serializer.deserialize(xml), as_dict)
+ serializer = wsgi.Serializer()
+ self.assertEqual(serializer.deserialize(xml, "application/xml"),
+ as_dict)
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index d760dc456..5e9a3aa3b 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -77,7 +77,8 @@ def stub_out_db_instance_api(stubs):
'mac_address': values['mac_address'],
'vcpus': type_data['vcpus'],
'local_gb': type_data['local_gb'],
- }
+ 'os_type': values['os_type']}
+
return FakeModel(base_options)
def fake_network_get_by_instance(context, instance_id):
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index cbd949477..5d7ca98b5 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -32,6 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS.network_size = 8
FLAGS.num_networks = 2
FLAGS.fake_network = True
+FLAGS.image_service = 'nova.image.local.LocalImageService'
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
diff --git a/nova/tests/integrated/__init__.py b/nova/tests/integrated/__init__.py
new file mode 100644
index 000000000..10e0a91d7
--- /dev/null
+++ b/nova/tests/integrated/__init__.py
@@ -0,0 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`integrated` -- Tests whole systems, using mock services where needed
+=================================
+"""
diff --git a/nova/tests/integrated/api/__init__.py b/nova/tests/integrated/api/__init__.py
new file mode 100644
index 000000000..5798ab3d1
--- /dev/null
+++ b/nova/tests/integrated/api/__init__.py
@@ -0,0 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`api` -- OpenStack API client, for testing rather than production
+=================================
+"""
diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py
new file mode 100644
index 000000000..245eb8c69
--- /dev/null
+++ b/nova/tests/integrated/api/client.py
@@ -0,0 +1,212 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import httplib
+import urlparse
+
+from nova import log as logging
+
+
+LOG = logging.getLogger('nova.tests.api')
+
+
+class OpenStackApiException(Exception):
+ def __init__(self, message=None, response=None):
+ self.response = response
+ if not message:
+ message = 'Unspecified error'
+
+ if response:
+ _status = response.status
+ _body = response.read()
+
+ message = _('%(message)s\nStatus Code: %(_status)s\n'
+ 'Body: %(_body)s') % locals()
+
+ super(OpenStackApiException, self).__init__(message)
+
+
+class OpenStackApiAuthenticationException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Authentication error")
+ super(OpenStackApiAuthenticationException, self).__init__(message,
+ response)
+
+
+class OpenStackApiNotFoundException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Item not found")
+ super(OpenStackApiNotFoundException, self).__init__(message, response)
+
+
+class TestOpenStackClient(object):
+ """ A really basic OpenStack API client that is under our control,
+ so we can make changes / insert hooks for testing"""
+
+ def __init__(self, auth_user, auth_key, auth_uri):
+ super(TestOpenStackClient, self).__init__()
+ self.auth_result = None
+ self.auth_user = auth_user
+ self.auth_key = auth_key
+ self.auth_uri = auth_uri
+
+ def request(self, url, method='GET', body=None, headers=None):
+ if headers is None:
+ headers = {}
+
+ parsed_url = urlparse.urlparse(url)
+ port = parsed_url.port
+ hostname = parsed_url.hostname
+ scheme = parsed_url.scheme
+
+ if scheme == 'http':
+ conn = httplib.HTTPConnection(hostname,
+ port=port)
+ elif scheme == 'https':
+ conn = httplib.HTTPSConnection(hostname,
+ port=port)
+ else:
+ raise OpenStackApiException("Unknown scheme: %s" % url)
+
+ relative_url = parsed_url.path
+ if parsed_url.query:
+ relative_url = relative_url + parsed_url.query
+ LOG.info(_("Doing %(method)s on %(relative_url)s") % locals())
+ if body:
+ LOG.info(_("Body: %s") % body)
+
+ conn.request(method, relative_url, body, headers)
+ response = conn.getresponse()
+ return response
+
+ def _authenticate(self):
+ if self.auth_result:
+ return self.auth_result
+
+ auth_uri = self.auth_uri
+ headers = {'X-Auth-User': self.auth_user,
+ 'X-Auth-Key': self.auth_key}
+ response = self.request(auth_uri,
+ headers=headers)
+
+ http_status = response.status
+ LOG.debug(_("%(auth_uri)s => code %(http_status)s") % locals())
+
+ # Until bug732866 is fixed, we can't check this properly...
+ #if http_status == 401:
+ if http_status != 204:
+ raise OpenStackApiAuthenticationException(response=response)
+
+ auth_headers = {}
+ for k, v in response.getheaders():
+ auth_headers[k] = v
+
+ self.auth_result = auth_headers
+ return self.auth_result
+
+ def api_request(self, relative_uri, check_response_status=None, **kwargs):
+ auth_result = self._authenticate()
+
+ #NOTE(justinsb): httplib 'helpfully' converts headers to lower case
+ base_uri = auth_result['x-server-management-url']
+ full_uri = base_uri + relative_uri
+
+ headers = kwargs.setdefault('headers', {})
+ headers['X-Auth-Token'] = auth_result['x-auth-token']
+
+ response = self.request(full_uri, **kwargs)
+
+ http_status = response.status
+ LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals())
+
+ if check_response_status:
+ if not http_status in check_response_status:
+ if http_status == 404:
+ raise OpenStackApiNotFoundException(response=response)
+ else:
+ raise OpenStackApiException(
+ message=_("Unexpected status code"),
+ response=response)
+
+ return response
+
+ def _decode_json(self, response):
+ body = response.read()
+ LOG.debug(_("Decoding JSON: %s") % (body))
+ return json.loads(body)
+
+ def api_get(self, relative_uri, **kwargs):
+ kwargs.setdefault('check_response_status', [200])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_post(self, relative_uri, body, **kwargs):
+ kwargs['method'] = 'POST'
+ if body:
+ headers = kwargs.setdefault('headers', {})
+ headers['Content-Type'] = 'application/json'
+ kwargs['body'] = json.dumps(body)
+
+ kwargs.setdefault('check_response_status', [200])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_delete(self, relative_uri, **kwargs):
+ kwargs['method'] = 'DELETE'
+ kwargs.setdefault('check_response_status', [200, 202])
+ return self.api_request(relative_uri, **kwargs)
+
+ def get_server(self, server_id):
+ return self.api_get('/servers/%s' % server_id)['server']
+
+ def get_servers(self, detail=True):
+ rel_url = '/servers/detail' if detail else '/servers'
+ return self.api_get(rel_url)['servers']
+
+ def post_server(self, server):
+ return self.api_post('/servers', server)['server']
+
+ def delete_server(self, server_id):
+ return self.api_delete('/servers/%s' % server_id)
+
+ def get_image(self, image_id):
+ return self.api_get('/images/%s' % image_id)['image']
+
+ def get_images(self, detail=True):
+ rel_url = '/images/detail' if detail else '/images'
+ return self.api_get(rel_url)['images']
+
+ def post_image(self, image):
+ return self.api_post('/images', image)['image']
+
+ def delete_image(self, image_id):
+ return self.api_delete('/images/%s' % image_id)
+
+ def get_flavor(self, flavor_id):
+ return self.api_get('/flavors/%s' % flavor_id)['flavor']
+
+ def get_flavors(self, detail=True):
+ rel_url = '/flavors/detail' if detail else '/flavors'
+ return self.api_get(rel_url)['flavors']
+
+ def post_flavor(self, flavor):
+ return self.api_post('/flavors', flavor)['flavor']
+
+ def delete_flavor(self, flavor_id):
+ return self.api_delete('/flavors/%s' % flavor_id)
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index b195fa520..cf8ee7eff 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -38,6 +38,8 @@ from nova import test
from nova.auth import manager
from nova.compute import power_state
from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova.image import local
from nova.objectstore import image
@@ -76,6 +78,12 @@ class CloudTestCase(test.TestCase):
project=self.project)
host = self.network.get_network_host(self.context.elevated())
+ def fake_show(meh, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(local.LocalImageService, 'show', fake_show)
+ self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
+
def tearDown(self):
network_ref = db.project_get_network(self.context,
self.project.id)
@@ -122,7 +130,7 @@ class CloudTestCase(test.TestCase):
self.cloud.allocate_address(self.context)
inst = db.instance_create(self.context, {'host': self.compute.host})
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
- ec2_id = cloud.id_to_ec2_id(inst['id'])
+ ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
@@ -158,12 +166,12 @@ class CloudTestCase(test.TestCase):
vol2 = db.volume_create(self.context, {})
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
- volume_id = cloud.id_to_ec2_id(vol2['id'], 'vol-%08x')
+ volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x')
result = self.cloud.describe_volumes(self.context,
volume_id=[volume_id])
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(
- cloud.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
+ ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
vol2['id'])
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
@@ -188,8 +196,10 @@ class CloudTestCase(test.TestCase):
def test_describe_instances(self):
"""Makes sure describe_instances works and filters results."""
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_id': 1,
'host': 'host1'})
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_id': 1,
'host': 'host2'})
comp1 = db.service_create(self.context, {'host': 'host1',
'availability_zone': 'zone1',
@@ -200,7 +210,7 @@ class CloudTestCase(test.TestCase):
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
- instance_id = cloud.id_to_ec2_id(inst2['id'])
+ instance_id = ec2utils.id_to_ec2_id(inst2['id'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
@@ -215,10 +225,9 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp2['id'])
def test_console_output(self):
- image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
max_count = 1
- kwargs = {'image_id': image_id,
+ kwargs = {'image_id': 'ami-1',
'instance_type': instance_type,
'max_count': max_count}
rv = self.cloud.run_instances(self.context, **kwargs)
@@ -234,8 +243,7 @@ class CloudTestCase(test.TestCase):
greenthread.sleep(0.3)
def test_ajax_console(self):
- image_id = FLAGS.default_image
- kwargs = {'image_id': image_id}
+ kwargs = {'image_id': 'ami-1'}
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
greenthread.sleep(0.3)
@@ -347,7 +355,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {})
- ec2_id = cloud.id_to_ec2_id(inst['id'])
+ ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3')
inst = db.instance_get(self.context, inst['id'])
@@ -365,7 +373,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_volume_display_fields(self):
vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context,
- cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
+ ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
display_name='c00l v0lum3')
vol = db.volume_get(self.context, vol['id'])
self.assertEqual('c00l v0lum3', vol['display_name'])
@@ -374,7 +382,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_volume_wont_update_private_fields(self):
vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context,
- cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
+ ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
mountpoint='/not/here')
vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint'])
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 58493d7ac..3651f4cef 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -20,6 +20,7 @@ Tests For Compute
"""
import datetime
+import mox
from nova import compute
from nova import context
@@ -27,15 +28,20 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import instance_types
-
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
+from nova.image import local
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
flags.DECLARE('stub_network', 'nova.compute.manager')
+flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
class ComputeTestCase(test.TestCase):
@@ -52,6 +58,11 @@ class ComputeTestCase(test.TestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False)
+ def fake_show(meh, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(local.LocalImageService, 'show', fake_show)
+
def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
@@ -60,7 +71,7 @@ class ComputeTestCase(test.TestCase):
def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
@@ -78,6 +89,21 @@ class ComputeTestCase(test.TestCase):
'project_id': self.project.id}
return db.security_group_create(self.context, values)
+ def _get_dummy_instance(self):
+ """Get mock-return-value instance object
+ Use this when any testcase executed later than test_run_terminate
+ """
+ vol1 = models.Volume()
+ vol1['id'] = 1
+ vol2 = models.Volume()
+ vol2['id'] = 2
+ instance_ref = models.Instance()
+ instance_ref['id'] = 1
+ instance_ref['volumes'] = [vol1, vol2]
+ instance_ref['hostname'] = 'i-00000001'
+ instance_ref['host'] = 'dummy'
+ return instance_ref
+
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
@@ -296,3 +322,256 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id)
type = instance_types.get_by_flavor_id("1")
self.assertEqual(type, 'm1.tiny')
+
+ def _setup_other_managers(self):
+ self.volume_manager = utils.import_object(FLAGS.volume_manager)
+ self.network_manager = utils.import_object(FLAGS.network_manager)
+ self.compute_driver = utils.import_object(FLAGS.compute_driver)
+
+ def test_pre_live_migration_instance_has_no_fixed_ip(self):
+ """Confirm raising exception if instance doesn't have fixed_ip."""
+ instance_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ dbmock.instance_get_fixed_address(c, i_id).AndReturn(None)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NotFound,
+ self.compute.pre_live_migration,
+ c, instance_ref['id'])
+
+ def test_pre_live_migration_instance_has_volume(self):
+ """Confirm setup_compute_volume is called when volume is mounted."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ volmock = self.mox.CreateMock(self.volume_manager)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ for i in range(len(i_ref['volumes'])):
+ vid = i_ref['volumes'][i]['id']
+ volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
+ netmock.setup_compute_network(c, i_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(i_ref)
+
+ self.compute.db = dbmock
+ self.compute.volume_manager = volmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_ref['id'])
+ self.assertEqual(ret, None)
+
+ def test_pre_live_migration_instance_has_no_volume(self):
+ """Confirm log meg when instance doesn't mount any volumes."""
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ self.mox.StubOutWithMock(compute_manager.LOG, 'info')
+ compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
+ netmock.setup_compute_network(c, i_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(i_ref)
+
+ self.compute.db = dbmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_ref['id'])
+ self.assertEqual(ret, None)
+
+ def test_pre_live_migration_setup_compute_node_fail(self):
+ """Confirm operation setup_compute_network() fails.
+
+ It retries and raise exception when timeout exceeded.
+
+ """
+
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ netmock = self.mox.CreateMock(self.network_manager)
+ volmock = self.mox.CreateMock(self.volume_manager)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ for i in range(len(i_ref['volumes'])):
+ volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
+ for i in range(FLAGS.live_migration_retry_count):
+ netmock.setup_compute_network(c, i_ref['id']).\
+ AndRaise(exception.ProcessExecutionError())
+
+ self.compute.db = dbmock
+ self.compute.network_manager = netmock
+ self.compute.volume_manager = volmock
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ProcessExecutionError,
+ self.compute.pre_live_migration,
+ c, i_ref['id'])
+
+ def test_live_migration_works_correctly_with_volume(self):
+ """Confirm check_for_export to confirm volume health check."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
+ "args": {'instance_id': i_ref['id']}})
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}})
+ self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
+ self.compute.driver.live_migration(c, i_ref, i_ref['host'],
+ self.compute.post_live_migration,
+ self.compute.recover_live_migration)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
+ self.assertEqual(ret, None)
+
+ def test_live_migration_dest_raises_exception(self):
+ """Confirm exception when pre_live_migration fails."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
+ "args": {'instance_id': i_ref['id']}})
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+ for v in i_ref['volumes']:
+ dbmock.volume_update(c, v['id'], {'status': 'in-use'})
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_ref['id'], i_ref['host'])
+
+ def test_live_migration_dest_raises_exception_no_volume(self):
+ """Same as above test(input pattern is different) """
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_ref['id'], i_ref['host'])
+
+ def test_live_migration_works_correctly_no_volume(self):
+ """Confirm live_migration() works as expected correctly."""
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}})
+ self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
+ self.compute.driver.live_migration(c, i_ref, i_ref['host'],
+ self.compute.post_live_migration,
+ self.compute.recover_live_migration)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
+ self.assertEqual(ret, None)
+
+ def test_post_live_migration_working_correctly(self):
+ """Confirm post_live_migration() works as expected correctly."""
+ dest = 'desthost'
+ flo_addr = '1.2.1.2'
+
+ # Preparing datas
+ c = context.get_admin_context()
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(c, instance_id)
+ db.instance_update(c, i_ref['id'], {'state_description': 'migrating',
+ 'state': power_state.PAUSED})
+ v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id})
+ fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
+ 'instance_id': instance_id})
+ fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
+ flo_ref = db.floating_ip_create(c, {'address': flo_addr,
+ 'fixed_ip_id': fix_ref['id']})
+ # reload is necessary before setting mocks
+ i_ref = db.instance_get(c, instance_id)
+
+ # Preparing mocks
+ self.mox.StubOutWithMock(self.compute.volume_manager,
+ 'remove_compute_volume')
+ for v in i_ref['volumes']:
+ self.compute.volume_manager.remove_compute_volume(c, v['id'])
+ self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
+ self.compute.driver.unfilter_instance(i_ref)
+
+ # executing
+ self.mox.ReplayAll()
+ ret = self.compute.post_live_migration(c, i_ref, dest)
+
+ # make sure every data is rewritten to dest
+ i_ref = db.instance_get(c, i_ref['id'])
+ c1 = (i_ref['host'] == dest)
+ flo_refs = db.floating_ip_get_all_by_host(c, dest)
+ c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr)
+
+ # post operaton
+ self.assertTrue(c1 and c2)
+ db.instance_destroy(c, instance_id)
+ db.volume_destroy(c, v_ref['id'])
+ db.floating_ip_destroy(c, flo_addr)
diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py
index 49ff24413..d47c70d88 100644
--- a/nova/tests/test_console.py
+++ b/nova/tests/test_console.py
@@ -57,7 +57,7 @@ class ConsoleTestCase(test.TestCase):
inst = {}
#inst['host'] = self.host
#inst['name'] = 'instance-1234'
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py
index b6bfab534..80e4d2e1f 100644
--- a/nova/tests/test_direct.py
+++ b/nova/tests/test_direct.py
@@ -59,6 +59,7 @@ class DirectTestCase(test.TestCase):
req.headers['X-OpenStack-User'] = 'user1'
req.headers['X-OpenStack-Project'] = 'proj1'
resp = req.get_response(self.auth_router)
+ self.assertEqual(resp.status_int, 200)
data = json.loads(resp.body)
self.assertEqual(data['user'], 'user1')
self.assertEqual(data['project'], 'proj1')
@@ -69,6 +70,7 @@ class DirectTestCase(test.TestCase):
req.method = 'POST'
req.body = 'json=%s' % json.dumps({'data': 'foo'})
resp = req.get_response(self.router)
+ self.assertEqual(resp.status_int, 200)
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
@@ -78,6 +80,7 @@ class DirectTestCase(test.TestCase):
req.method = 'POST'
req.body = 'data=foo'
resp = req.get_response(self.router)
+ self.assertEqual(resp.status_int, 200)
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
@@ -90,8 +93,7 @@ class DirectTestCase(test.TestCase):
class DirectCloudTestCase(test_cloud.CloudTestCase):
def setUp(self):
super(DirectCloudTestCase, self).setUp()
- compute_handle = compute.API(image_service=self.cloud.image_service,
- network_api=self.cloud.network_api,
+ compute_handle = compute.API(network_api=self.cloud.network_api,
volume_api=self.cloud.volume_api)
direct.register_service('compute', compute_handle)
self.router = direct.JsonParamsMiddleware(direct.Router())
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index e6da6112a..1fbaf304f 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -14,26 +14,29 @@
# License for the specific language governing permissions and limitations
# under the License.
+import errno
import os
+import select
from nova import test
-from nova.utils import parse_mailmap, str_dict_replace
+from nova.utils import parse_mailmap, str_dict_replace, synchronized
class ProjectTestCase(test.TestCase):
def test_authors_up_to_date(self):
- if os.path.exists('.bzr'):
+ topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
+ if os.path.exists(os.path.join(topdir, '.bzr')):
contributors = set()
- mailmap = parse_mailmap('.mailmap')
+ mailmap = parse_mailmap(os.path.join(topdir, '.mailmap'))
import bzrlib.workingtree
- tree = bzrlib.workingtree.WorkingTree.open('.')
+ tree = bzrlib.workingtree.WorkingTree.open(topdir)
tree.lock_read()
try:
parents = tree.get_parent_ids()
g = tree.branch.repository.get_graph()
- for p in parents[1:]:
+ for p in parents:
rev_ids = [r for r, _ in g.iter_ancestry(parents)
if r != "null:"]
revs = tree.branch.repository.get_revisions(rev_ids)
@@ -42,7 +45,8 @@ class ProjectTestCase(test.TestCase):
email = author.split(' ')[-1]
contributors.add(str_dict_replace(email, mailmap))
- authors_file = open('Authors', 'r').read()
+ authors_file = open(os.path.join(topdir, 'Authors'),
+ 'r').read()
missing = set()
for contributor in contributors:
@@ -55,3 +59,47 @@ class ProjectTestCase(test.TestCase):
'%r not listed in Authors' % missing)
finally:
tree.unlock()
+
+
+class LockTestCase(test.TestCase):
+ def test_synchronized_wrapped_function_metadata(self):
+ @synchronized('whatever')
+ def foo():
+ """Bar"""
+ pass
+ self.assertEquals(foo.__doc__, 'Bar', "Wrapped function's docstring "
+ "got lost")
+ self.assertEquals(foo.__name__, 'foo', "Wrapped function's name "
+ "got mangled")
+
+ def test_synchronized(self):
+ rpipe1, wpipe1 = os.pipe()
+ rpipe2, wpipe2 = os.pipe()
+
+ @synchronized('testlock')
+ def f(rpipe, wpipe):
+ try:
+ os.write(wpipe, "foo")
+ except OSError, e:
+ self.assertEquals(e.errno, errno.EPIPE)
+ return
+
+ rfds, _, __ = select.select([rpipe], [], [], 1)
+ self.assertEquals(len(rfds), 0, "The other process, which was"
+ " supposed to be locked, "
+ "wrote on its end of the "
+ "pipe")
+ os.close(rpipe)
+
+ pid = os.fork()
+ if pid > 0:
+ os.close(wpipe1)
+ os.close(rpipe2)
+
+ f(rpipe1, wpipe2)
+ else:
+ os.close(rpipe1)
+ os.close(wpipe2)
+
+ f(rpipe2, wpipe1)
+ os._exit(0)
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index ce1c77210..1e634b388 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -20,6 +20,7 @@ Unit Tests for network code
"""
import IPy
import os
+import time
from nova import context
from nova import db
@@ -29,11 +30,153 @@ from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
+from nova.network import linux_net
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
+class IptablesManagerTestCase(test.TestCase):
+ sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
+ '*filter',
+ ':INPUT ACCEPT [2223527:305688874]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [2172501:140856656]',
+ ':nova-compute-FORWARD - [0:0]',
+ ':nova-compute-INPUT - [0:0]',
+ ':nova-compute-local - [0:0]',
+ ':nova-compute-OUTPUT - [0:0]',
+ ':nova-filter-top - [0:0]',
+ '-A FORWARD -j nova-filter-top ',
+ '-A OUTPUT -j nova-filter-top ',
+ '-A nova-filter-top -j nova-compute-local ',
+ '-A INPUT -j nova-compute-INPUT ',
+ '-A OUTPUT -j nova-compute-OUTPUT ',
+ '-A FORWARD -j nova-compute-FORWARD ',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '-A FORWARD -o virbr0 -j REJECT --reject-with '
+ 'icmp-port-unreachable ',
+ '-A FORWARD -i virbr0 -j REJECT --reject-with '
+ 'icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Fri Feb 18 15:17:05 2011']
+
+ sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [3936:762355]',
+ ':INPUT ACCEPT [2447:225266]',
+ ':OUTPUT ACCEPT [63491:4191863]',
+ ':POSTROUTING ACCEPT [63112:4108641]',
+ ':nova-compute-OUTPUT - [0:0]',
+ ':nova-compute-floating-ip-snat - [0:0]',
+ ':nova-compute-SNATTING - [0:0]',
+ ':nova-compute-PREROUTING - [0:0]',
+ ':nova-compute-POSTROUTING - [0:0]',
+ ':nova-postrouting-bottom - [0:0]',
+ '-A PREROUTING -j nova-compute-PREROUTING ',
+ '-A OUTPUT -j nova-compute-OUTPUT ',
+ '-A POSTROUTING -j nova-compute-POSTROUTING ',
+ '-A POSTROUTING -j nova-postrouting-bottom ',
+ '-A nova-postrouting-bottom -j nova-compute-SNATTING ',
+ '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ',
+ 'COMMIT',
+ '# Completed on Fri Feb 18 15:17:05 2011']
+
+ def setUp(self):
+ super(IptablesManagerTestCase, self).setUp()
+ self.manager = linux_net.IptablesManager()
+
+ def test_filter_rules_are_wrapped(self):
+ current_lines = self.sample_filter
+
+ table = self.manager.ipv4['filter']
+ table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ new_lines = self.manager._modify_rules(current_lines, table)
+ self.assertTrue('-A run_tests.py-FORWARD '
+ '-s 1.2.3.4/5 -j DROP' in new_lines)
+
+ table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ new_lines = self.manager._modify_rules(current_lines, table)
+ self.assertTrue('-A run_tests.py-FORWARD '
+ '-s 1.2.3.4/5 -j DROP' not in new_lines)
+
+ def test_nat_rules(self):
+ current_lines = self.sample_nat
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['nat'])
+
+ for line in [':nova-compute-OUTPUT - [0:0]',
+ ':nova-compute-floating-ip-snat - [0:0]',
+ ':nova-compute-SNATTING - [0:0]',
+ ':nova-compute-PREROUTING - [0:0]',
+ ':nova-compute-POSTROUTING - [0:0]']:
+ self.assertTrue(line in new_lines, "One of nova-compute's chains "
+ "went missing.")
+
+ seen_lines = set()
+ for line in new_lines:
+ line = line.strip()
+ self.assertTrue(line not in seen_lines,
+ "Duplicate line: %s" % line)
+ seen_lines.add(line)
+
+ last_postrouting_line = ''
+
+ for line in new_lines:
+ if line.startswith('-A POSTROUTING'):
+ last_postrouting_line = line
+
+ self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
+ "Last POSTROUTING rule does not jump to "
+ "nova-postouting-bottom: %s" % last_postrouting_line)
+
+ for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
+ self.assertTrue('-A %s -j run_tests.py-%s' \
+ % (chain, chain) in new_lines,
+ "Built-in chain %s not wrapped" % (chain,))
+
+ def test_filter_rules(self):
+ current_lines = self.sample_filter
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'])
+
+ for line in [':nova-compute-FORWARD - [0:0]',
+ ':nova-compute-INPUT - [0:0]',
+ ':nova-compute-local - [0:0]',
+ ':nova-compute-OUTPUT - [0:0]']:
+ self.assertTrue(line in new_lines, "One of nova-compute's chains"
+ " went missing.")
+
+ seen_lines = set()
+ for line in new_lines:
+ line = line.strip()
+ self.assertTrue(line not in seen_lines,
+ "Duplicate line: %s" % line)
+ seen_lines.add(line)
+
+ for chain in ['FORWARD', 'OUTPUT']:
+ for line in new_lines:
+ if line.startswith('-A %s' % chain):
+ self.assertTrue('-j nova-filter-top' in line,
+ "First %s rule does not "
+ "jump to nova-filter-top" % chain)
+ break
+
+ self.assertTrue('-A nova-filter-top '
+ '-j run_tests.py-local' in new_lines,
+ "nova-filter-top does not jump to wrapped local chain")
+
+ for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
+ self.assertTrue('-A %s -j run_tests.py-%s' \
+ % (chain, chain) in new_lines,
+ "Built-in chain %s not wrapped" % (chain,))
+
+
class NetworkTestCase(test.TestCase):
"""Test cases for network code"""
def setUp(self):
@@ -321,6 +464,31 @@ class NetworkTestCase(test.TestCase):
network['id'])
self.assertEqual(ip_count, num_available_ips)
+ def test_dhcp_lease_output(self):
+ admin_ctxt = context.get_admin_context()
+ address = self._create_address(0, self.instance_id)
+ lease_ip(address)
+ network_ref = db.network_get_by_instance(admin_ctxt, self.instance_id)
+ leases = linux_net.get_dhcp_leases(context.get_admin_context(),
+ network_ref['id'])
+ for line in leases.split('\n'):
+ seconds, mac, ip, hostname, client_id = line.split(' ')
+ self.assertTrue(int(seconds) > time.time(), 'Lease expires in '
+ 'the past')
+ octets = mac.split(':')
+ self.assertEqual(len(octets), 6, "Wrong number of octets "
+ "in %s" % (max,))
+ for octet in octets:
+ self.assertEqual(len(octet), 2, "Oddly sized octet: %s"
+ % (octet,))
+ # This will throw an exception if the octet is invalid
+ int(octet, 16)
+
+ # And this will raise an exception in case of an invalid IP
+ IPy.IP(ip)
+
+ release_ip(address)
+
def is_allocated_in_project(address, project_id):
"""Returns true if address is in specified project"""
@@ -343,13 +511,13 @@ def lease_ip(private_ip):
private_ip)
instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
private_ip)
- cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
- instance_ref['mac_address'],
- private_ip)
+ cmd = (binpath('nova-dhcpbridge'), 'add',
+ instance_ref['mac_address'],
+ private_ip, 'fake')
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
- (out, err) = utils.execute(cmd, addl_env=env)
+ (out, err) = utils.execute(*cmd, addl_env=env)
LOG.debug("ISSUE_IP: %s, %s ", out, err)
@@ -359,11 +527,11 @@ def release_ip(private_ip):
private_ip)
instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
private_ip)
- cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
- instance_ref['mac_address'],
- private_ip)
+ cmd = (binpath('nova-dhcpbridge'), 'del',
+ instance_ref['mac_address'],
+ private_ip, 'fake')
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
- (out, err) = utils.execute(cmd, addl_env=env)
+ (out, err) = utils.execute(*cmd, addl_env=env)
LOG.debug("RELEASE_IP: %s, %s ", out, err)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 4ecb36b54..c65bc459d 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -20,11 +20,12 @@ from nova import compute
from nova import context
from nova import db
from nova import flags
+from nova import network
from nova import quota
from nova import test
from nova import utils
+from nova import volume
from nova.auth import manager
-from nova.api.ec2 import cloud
from nova.compute import instance_types
@@ -32,6 +33,12 @@ FLAGS = flags.FLAGS
class QuotaTestCase(test.TestCase):
+
+ class StubImageService(object):
+
+ def show(self, *args, **kwargs):
+ return {"properties": {}}
+
def setUp(self):
super(QuotaTestCase, self).setUp()
self.flags(connection_type='fake',
@@ -41,7 +48,6 @@ class QuotaTestCase(test.TestCase):
quota_gigabytes=20,
quota_floating_ips=1)
- self.cloud = cloud.CloudController()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('admin', 'admin', 'admin')
@@ -57,7 +63,7 @@ class QuotaTestCase(test.TestCase):
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
@@ -118,12 +124,12 @@ class QuotaTestCase(test.TestCase):
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
- self.assertRaises(quota.QuotaError, self.cloud.run_instances,
+ self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
- image_id='fake')
+ image_id=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -131,12 +137,12 @@ class QuotaTestCase(test.TestCase):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
- self.assertRaises(quota.QuotaError, self.cloud.run_instances,
+ self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
- image_id='fake')
+ image_id=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -145,9 +151,12 @@ class QuotaTestCase(test.TestCase):
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
- self.assertRaises(quota.QuotaError, self.cloud.create_volume,
- self.context,
- size=10)
+ self.assertRaises(quota.QuotaError,
+ volume.API().create,
+ self.context,
+ size=10,
+ name='',
+ description='')
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
@@ -156,9 +165,11 @@ class QuotaTestCase(test.TestCase):
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
self.assertRaises(quota.QuotaError,
- self.cloud.create_volume,
+ volume.API().create,
self.context,
- size=10)
+ size=10,
+ name='',
+ description='')
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
@@ -172,7 +183,8 @@ class QuotaTestCase(test.TestCase):
# make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks
# that is breaking.
- self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
+ self.assertRaises(quota.QuotaError,
+ network.API().allocate_floating_ip,
self.context)
db.floating_ip_destroy(context.get_admin_context(), address)
@@ -187,3 +199,67 @@ class QuotaTestCase(test.TestCase):
instance_type='m1.small',
image_id='fake',
metadata=metadata)
+
+ def test_allowed_injected_files(self):
+ self.assertEqual(
+ quota.allowed_injected_files(self.context),
+ FLAGS.quota_max_injected_files)
+
+ def _create_with_injected_files(self, files):
+ api = compute.API(image_service=self.StubImageService())
+ api.create(self.context, min_count=1, max_count=1,
+ instance_type='m1.small', image_id='fake',
+ injected_files=files)
+
+ def test_no_injected_files(self):
+ api = compute.API(image_service=self.StubImageService())
+ api.create(self.context, instance_type='m1.small', image_id='fake')
+
+ def test_max_injected_files(self):
+ files = []
+ for i in xrange(FLAGS.quota_max_injected_files):
+ files.append(('/my/path%d' % i, 'config = test\n'))
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_files(self):
+ files = []
+ for i in xrange(FLAGS.quota_max_injected_files + 1):
+ files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
+ self.assertRaises(quota.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_allowed_injected_file_content_bytes(self):
+ self.assertEqual(
+ quota.allowed_injected_file_content_bytes(self.context),
+ FLAGS.quota_max_injected_file_content_bytes)
+
+ def test_max_injected_file_content_bytes(self):
+ max = FLAGS.quota_max_injected_file_content_bytes
+ content = ''.join(['a' for i in xrange(max)])
+ files = [('/test/path', content)]
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_file_content_bytes(self):
+ max = FLAGS.quota_max_injected_file_content_bytes
+ content = ''.join(['a' for i in xrange(max + 1)])
+ files = [('/test/path', content)]
+ self.assertRaises(quota.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_allowed_injected_file_path_bytes(self):
+ self.assertEqual(
+ quota.allowed_injected_file_path_bytes(self.context),
+ FLAGS.quota_max_injected_file_path_bytes)
+
+ def test_max_injected_file_path_bytes(self):
+ max = FLAGS.quota_max_injected_file_path_bytes
+ path = ''.join(['a' for i in xrange(max)])
+ files = [(path, 'config = quotatest')]
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_file_path_bytes(self):
+ max = FLAGS.quota_max_injected_file_path_bytes
+ path = ''.join(['a' for i in xrange(max + 1)])
+ files = [(path, 'config = quotatest')]
+ self.assertRaises(quota.QuotaError,
+ self._create_with_injected_files, files)
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index b6888c4d2..244e43bd9 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -20,10 +20,12 @@ Tests For Scheduler
"""
import datetime
+import mox
from mox import IgnoreArg
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import service
from nova import test
@@ -32,11 +34,14 @@ from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import manager
from nova.scheduler import driver
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple')
flags.DECLARE('stub_network', 'nova.compute.manager')
+flags.DECLARE('instances_path', 'nova.compute.manager')
class TestDriver(driver.Scheduler):
@@ -54,6 +59,34 @@ class SchedulerTestCase(test.TestCase):
super(SchedulerTestCase, self).setUp()
self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
+ def _create_compute_service(self):
+ """Create compute-manager(ComputeNode and Service record)."""
+ ctxt = context.get_admin_context()
+ dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': 'dummyzone'}
+ s_ref = db.service_create(ctxt, dic)
+
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+ db.compute_node_create(ctxt, dic)
+
+ return db.service_get(ctxt, s_ref['id'])
+
+ def _create_instance(self, **kwargs):
+ """Create a test instance"""
+ ctxt = context.get_admin_context()
+ inst = {}
+ inst['user_id'] = 'admin'
+ inst['project_id'] = kwargs.get('project_id', 'fake')
+ inst['host'] = kwargs.get('host', 'dummy')
+ inst['vcpus'] = kwargs.get('vcpus', 1)
+ inst['memory_mb'] = kwargs.get('memory_mb', 10)
+ inst['local_gb'] = kwargs.get('local_gb', 20)
+ return db.instance_create(ctxt, inst)
+
def test_fallback(self):
scheduler = manager.SchedulerManager()
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
@@ -76,6 +109,73 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
scheduler.named_method(ctxt, 'topic', num=7)
+ def test_show_host_resources_host_not_exit(self):
+ """A host given as an argument does not exists."""
+
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+
+ try:
+ scheduler.show_host_resources(ctxt, dest)
+ except exception.NotFound, e:
+ c1 = (e.message.find(_("does not exist or is not a "
+ "compute node.")) >= 0)
+ self.assertTrue(c1)
+
+ def _dic_is_equal(self, dic1, dic2, keys=None):
+ """Compares 2 dictionary contents(Helper method)"""
+ if not keys:
+ keys = ['vcpus', 'memory_mb', 'local_gb',
+ 'vcpus_used', 'memory_mb_used', 'local_gb_used']
+
+ for key in keys:
+ if not (dic1[key] == dic2[key]):
+ return False
+ return True
+
+ def test_show_host_resources_no_project(self):
+ """No instance are running on the given host."""
+
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ s_ref = self._create_compute_service()
+
+ result = scheduler.show_host_resources(ctxt, s_ref['host'])
+
+ # result checking
+ c1 = ('resource' in result and 'usage' in result)
+ compute_node = s_ref['compute_node'][0]
+ c2 = self._dic_is_equal(result['resource'], compute_node)
+ c3 = result['usage'] == {}
+ self.assertTrue(c1 and c2 and c3)
+ db.service_destroy(ctxt, s_ref['id'])
+
+ def test_show_host_resources_works_correctly(self):
+ """Show_host_resources() works correctly as expected."""
+
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ s_ref = self._create_compute_service()
+ i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host'])
+ i_ref2 = self._create_instance(project_id='p-02', vcpus=3,
+ host=s_ref['host'])
+
+ result = scheduler.show_host_resources(ctxt, s_ref['host'])
+
+ c1 = ('resource' in result and 'usage' in result)
+ compute_node = s_ref['compute_node'][0]
+ c2 = self._dic_is_equal(result['resource'], compute_node)
+ c3 = result['usage'].keys() == ['p-01', 'p-02']
+ keys = ['vcpus', 'memory_mb', 'local_gb']
+ c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys)
+ c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys)
+ self.assertTrue(c1 and c2 and c3 and c4 and c5)
+
+ db.service_destroy(ctxt, s_ref['id'])
+ db.instance_destroy(ctxt, i_ref1['id'])
+ db.instance_destroy(ctxt, i_ref2['id'])
+
class ZoneSchedulerTestCase(test.TestCase):
"""Test case for zone scheduler"""
@@ -155,26 +255,235 @@ class SimpleDriverTestCase(test.TestCase):
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
+ inst['vcpus'] = kwargs.get('vcpus', 1)
inst['ami_launch_index'] = 0
- inst['vcpus'] = 1
inst['availability_zone'] = kwargs.get('availability_zone', None)
+ inst['host'] = kwargs.get('host', 'dummy')
+ inst['memory_mb'] = kwargs.get('memory_mb', 20)
+ inst['local_gb'] = kwargs.get('local_gb', 30)
+ inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
+ inst['state_description'] = kwargs.get('state_description', 'running')
+ inst['state'] = kwargs.get('state', power_state.RUNNING)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
"""Create a test volume"""
vol = {}
- vol['image_id'] = 'ami-test'
- vol['reservation_id'] = 'r-fakeres'
vol['size'] = 1
vol['availability_zone'] = 'test'
return db.volume_create(self.context, vol)['id']
+ def _create_compute_service(self, **kwargs):
+ """Create a compute service."""
+
+ dic = {'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': 'dummyzone'}
+ dic['host'] = kwargs.get('host', 'dummy')
+ s_ref = db.service_create(self.context, dic)
+ if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():
+ t = datetime.datetime.utcnow() - datetime.timedelta(0)
+ dic['created_at'] = kwargs.get('created_at', t)
+ dic['updated_at'] = kwargs.get('updated_at', t)
+ db.service_update(self.context, s_ref['id'], dic)
+
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+ dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)
+ dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')
+ dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)
+ db.compute_node_create(self.context, dic)
+ return db.service_get(self.context, s_ref['id'])
+
+ def test_doesnt_report_disabled_hosts_as_up(self):
+ """Ensures driver doesn't find hosts before they are enabled"""
+ # NOTE(vish): constructing service without create method
+ # because we are going to use it without queue
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
+ db.service_update(self.context, s1['id'], {'disabled': True})
+ db.service_update(self.context, s2['id'], {'disabled': True})
+ hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
+ self.assertEqual(0, len(hosts))
+ compute1.kill()
+ compute2.kill()
+
+ def test_reports_enabled_hosts_as_up(self):
+ """Ensures driver can find the hosts that are up"""
+ # NOTE(vish): constructing service without create method
+ # because we are going to use it without queue
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
+ self.assertEqual(2, len(hosts))
+ compute1.kill()
+ compute2.kill()
+
+ def test_least_busy_host_gets_instance(self):
+ """Ensures the host with less cores gets the next one"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ instance_id1 = self._create_instance()
+ compute1.run_instance(self.context, instance_id1)
+ instance_id2 = self._create_instance()
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual(host, 'host2')
+ compute1.terminate_instance(self.context, instance_id1)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+ compute2.kill()
+
+ def test_specific_host_gets_instance(self):
+ """Ensures if you set availability_zone it launches on that zone"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ instance_id1 = self._create_instance()
+ compute1.run_instance(self.context, instance_id1)
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual('host1', host)
+ compute1.terminate_instance(self.context, instance_id1)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+ compute2.kill()
+
+ def test_wont_sechedule_if_specified_host_is_down(self):
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ now = datetime.datetime.utcnow()
+ delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
+ past = now - delta
+ db.service_update(self.context, s1['id'], {'updated_at': past})
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ self.assertRaises(driver.WillNotSchedule,
+ self.scheduler.driver.schedule_run_instance,
+ self.context,
+ instance_id2)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+
+ def test_will_schedule_on_disabled_host_if_specified(self):
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ db.service_update(self.context, s1['id'], {'disabled': True})
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual('host1', host)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+
+ def test_too_many_cores(self):
+ """Ensures we don't go over max cores"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ instance_ids1 = []
+ instance_ids2 = []
+ for index in xrange(FLAGS.max_cores):
+ instance_id = self._create_instance()
+ compute1.run_instance(self.context, instance_id)
+ instance_ids1.append(instance_id)
+ instance_id = self._create_instance()
+ compute2.run_instance(self.context, instance_id)
+ instance_ids2.append(instance_id)
+ instance_id = self._create_instance()
+ self.assertRaises(driver.NoValidHost,
+ self.scheduler.driver.schedule_run_instance,
+ self.context,
+ instance_id)
+ for instance_id in instance_ids1:
+ compute1.terminate_instance(self.context, instance_id)
+ for instance_id in instance_ids2:
+ compute2.terminate_instance(self.context, instance_id)
+ compute1.kill()
+ compute2.kill()
+
+ def test_least_busy_host_gets_volume(self):
+ """Ensures the host with less gigabytes gets the next one"""
+ volume1 = service.Service('host1',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume1.start()
+ volume2 = service.Service('host2',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume2.start()
+ volume_id1 = self._create_volume()
+ volume1.create_volume(self.context, volume_id1)
+ volume_id2 = self._create_volume()
+ host = self.scheduler.driver.schedule_create_volume(self.context,
+ volume_id2)
+ self.assertEqual(host, 'host2')
+ volume1.delete_volume(self.context, volume_id1)
+ db.volume_destroy(self.context, volume_id2)
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+
def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
compute1 = self.start_service('compute', host='host1')
@@ -318,3 +627,313 @@ class SimpleDriverTestCase(test.TestCase):
volume2.delete_volume(self.context, volume_id)
volume1.kill()
volume2.kill()
+
+ def test_scheduler_live_migration_with_volume(self):
+ """scheduler_live_migration() works correctly as expected.
+
+ Also, checks instance state is changed from 'running' -> 'migrating'.
+
+ """
+
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ dic = {'instance_id': instance_id, 'size': 1}
+ v_ref = db.volume_create(self.context, dic)
+
+ # cannot check 2nd argument b/c the addresses of instance object
+ # is different.
+ driver_i = self.scheduler.driver
+ nocare = mox.IgnoreArg()
+ self.mox.StubOutWithMock(driver_i, '_live_migration_src_check')
+ self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
+ self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
+ driver_i._live_migration_src_check(nocare, nocare)
+ driver_i._live_migration_dest_check(nocare, nocare, i_ref['host'])
+ driver_i._live_migration_common_check(nocare, nocare, i_ref['host'])
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ kwargs = {'instance_id': instance_id, 'dest': i_ref['host']}
+ rpc.cast(self.context,
+ db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']),
+ {"method": 'live_migration', "args": kwargs})
+
+ self.mox.ReplayAll()
+ self.scheduler.live_migration(self.context, FLAGS.compute_topic,
+ instance_id=instance_id,
+ dest=i_ref['host'])
+
+ i_ref = db.instance_get(self.context, instance_id)
+ self.assertTrue(i_ref['state_description'] == 'migrating')
+ db.instance_destroy(self.context, instance_id)
+ db.volume_destroy(self.context, v_ref['id'])
+
+ def test_live_migration_src_check_instance_not_running(self):
+ """The instance given by instance_id is not running."""
+
+ instance_id = self._create_instance(state_description='migrating')
+ i_ref = db.instance_get(self.context, instance_id)
+
+ try:
+ self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+ except exception.Invalid, e:
+ c = (e.message.find('is not running') > 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+
+ def test_live_migration_src_check_volume_node_not_alive(self):
+ """Raise exception when volume node is not alive."""
+
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ dic = {'instance_id': instance_id, 'size': 1}
+ v_ref = db.volume_create(self.context, {'instance_id': instance_id,
+ 'size': 1})
+ t1 = datetime.datetime.utcnow() - datetime.timedelta(1)
+ dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',
+ 'topic': 'volume', 'report_count': 0}
+ s_ref = db.service_create(self.context, dic)
+
+ try:
+ self.scheduler.driver.schedule_live_migration(self.context,
+ instance_id,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('volume node is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.volume_destroy(self.context, v_ref['id'])
+
+ def test_live_migration_src_check_compute_node_not_alive(self):
+ """Confirms src-compute node is alive."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t, updated_at=t,
+ host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+ except exception.Invalid, e:
+ c = (e.message.find('is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_src_check_works_correctly(self):
+ """Confirms this method finishes with no error."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host=i_ref['host'])
+
+ ret = self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+
+ self.assertTrue(ret == None)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_not_alive(self):
+ """Confirms exception raises in case dest host does not exist."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t, updated_at=t,
+ host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_same_host(self):
+ """Confirms exceptioin raises in case dest and src is same host."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('choose other host') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_lack_memory(self):
+ """Confirms exception raises when dest doesn't have enough memory."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host='somewhere',
+ memory_mb_used=12)
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ 'somewhere')
+ except exception.NotEmpty, e:
+ c = (e.message.find('Unable to migrate') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_works_correctly(self):
+ """Confirms method finishes with no error."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host='somewhere',
+ memory_mb_used=5)
+
+ ret = self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ 'somewhere')
+ self.assertTrue(ret == None)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_common_check_service_orig_not_exists(self):
+ """Destination host does not exist."""
+
+ dest = 'dummydest'
+ # mocks for live_migration_common_check()
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t1 = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
+ host=dest)
+
+ # mocks for mounted_on_same_shared_storage()
+ fpath = '/test/20110127120000'
+ self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
+ topic = FLAGS.compute_topic
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(self.context, topic, dest),
+ {"method": 'create_shared_storage_test_file'}).AndReturn(fpath)
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']),
+ {"method": 'check_shared_storage_test_file',
+ "args": {'filename': fpath}})
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(mox.IgnoreArg(), topic, dest),
+ {"method": 'cleanup_shared_storage_test_file',
+ "args": {'filename': fpath}})
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find('does not exist') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_common_check_service_different_hypervisor(self):
+ """Original host and dest host has different hypervisor type."""
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen')
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find(_('Different hypervisor type')) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
+
+ def test_live_migration_common_check_service_different_version(self):
+ """Original host and dest host has different hypervisor version."""
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest,
+ hypervisor_version=12002)
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find(_('Older hypervisor version')) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
+
+ def test_live_migration_common_check_checking_cpuinfo_fail(self):
+ """Raise excetion when original host doen't have compatible cpu."""
+
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest)
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+ self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)
+ rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
+ {"method": 'compare_cpu',
+ "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
+ AndRaise(rpc.RemoteError("doesn't have compatibility to", "", ""))
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except rpc.RemoteError, e:
+ c = (e.message.find(_("doesn't have compatibility to")) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index b006caadd..d48de2057 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -30,6 +30,7 @@ from nova import rpc
from nova import test
from nova import service
from nova import manager
+from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager",
@@ -260,3 +261,44 @@ class ServiceTestCase(test.TestCase):
serv.report_state()
self.assert_(not serv.model_disconnected)
+
+ def test_compute_can_update_available_resource(self):
+ """Confirm compute updates their record of compute-service table."""
+ host = 'foo'
+ binary = 'nova-compute'
+ topic = 'compute'
+
+ # Any mocks are not working without UnsetStubs() here.
+ self.mox.UnsetStubs()
+ ctxt = context.get_admin_context()
+ service_ref = db.service_create(ctxt, {'host': host,
+ 'binary': binary,
+ 'topic': topic})
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.compute.manager.ComputeManager')
+
+ # This testcase want to test calling update_available_resource.
+ # No need to call periodic call, then below variable must be set 0.
+ serv.report_interval = 0
+ serv.periodic_interval = 0
+
+ # Creating mocks
+ self.mox.StubOutWithMock(service.rpc.Connection, 'instance')
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ self.mox.StubOutWithMock(serv.manager.driver,
+ 'update_available_resource')
+ serv.manager.driver.update_available_resource(mox.IgnoreArg(), host)
+
+ # Just doing start()-stop(), not confirm new db record is created,
+ # because update_available_resource() works only in
+ # libvirt environment. This testcase confirms
+ # update_available_resource() is called. Otherwise, mox complains.
+ self.mox.ReplayAll()
+ serv.start()
+ serv.stop()
+
+ db.service_destroy(ctxt, service_ref['id'])
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index f151ae911..b214f5ce7 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -14,33 +14,125 @@
# License for the specific language governing permissions and limitations
# under the License.
+import eventlet
+import mox
+import os
+import re
+import sys
+
from xml.etree.ElementTree import fromstring as xml_to_tree
from xml.dom.minidom import parseString as xml_to_dom
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
from nova.virt import libvirt_conn
+libvirt = None
FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
+def _concurrency(wait, done, target):
+ wait.wait()
+ done.send()
+
+
+class CacheConcurrencyTestCase(test.TestCase):
+ def setUp(self):
+ super(CacheConcurrencyTestCase, self).setUp()
+
+ def fake_exists(fname):
+ basedir = os.path.join(FLAGS.instances_path, '_base')
+ if fname == basedir:
+ return True
+ return False
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ self.stubs.Set(os.path, 'exists', fake_exists)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ def test_same_fname_concurrency(self):
+ """Ensures that the same fname cache runs at a sequentially"""
+ conn = libvirt_conn.LibvirtConnection
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname', False, wait1, done1)
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname', False, wait2, done2)
+ wait2.send()
+ eventlet.sleep(0)
+ try:
+ self.assertFalse(done2.ready())
+ self.assertTrue('fname' in conn._image_sems)
+ finally:
+ wait1.send()
+ done1.wait()
+ eventlet.sleep(0)
+ self.assertTrue(done2.ready())
+ self.assertFalse('fname' in conn._image_sems)
+
+ def test_different_fname_concurrency(self):
+ """Ensures that two different fname caches are concurrent"""
+ conn = libvirt_conn.LibvirtConnection
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname2', False, wait1, done1)
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname1', False, wait2, done2)
+ wait2.send()
+ eventlet.sleep(0)
+ try:
+ self.assertTrue(done2.ready())
+ finally:
+ wait1.send()
+ eventlet.sleep(0)
+
+
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
libvirt_conn._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
+
+ try:
+ pjs = self.manager.get_projects()
+ pjs = [p for p in pjs if p.name == 'fake']
+ if 0 != len(pjs):
+ self.manager.delete_project(pjs[0])
+
+ users = self.manager.get_users()
+ users = [u for u in users if u.name == 'fake']
+ if 0 != len(users):
+ self.manager.delete_user(users[0])
+ except Exception, e:
+ pass
+
+ users = self.manager.get_users()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
+ self.context = context.get_admin_context()
FLAGS.instances_path = ''
+ self.call_libvirt_dependant_setup = False
test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000',
@@ -52,6 +144,58 @@ class LibvirtConnTestCase(test.TestCase):
'bridge': 'br101',
'instance_type': 'm1.small'}
+ def lazy_load_library_exists(self):
+ """check if libvirt is available."""
+ # try to connect libvirt. if fail, skip test.
+ try:
+ import libvirt
+ import libxml2
+ except ImportError:
+ return False
+ global libvirt
+ libvirt = __import__('libvirt')
+ libvirt_conn.libvirt = __import__('libvirt')
+ libvirt_conn.libxml2 = __import__('libxml2')
+ return True
+
+ def create_fake_libvirt_mock(self, **kwargs):
+ """Defining mocks for LibvirtConnection(libvirt is not used)."""
+
+ # A fake libvirt.virConnect
+ class FakeLibvirtConnection(object):
+ pass
+
+ # A fake libvirt_conn.IptablesFirewallDriver
+ class FakeIptablesFirewallDriver(object):
+
+ def __init__(self, **kwargs):
+ pass
+
+ def setattr(self, key, val):
+ self.__setattr__(key, val)
+
+ # Creating mocks
+ fake = FakeLibvirtConnection()
+ fakeip = FakeIptablesFirewallDriver
+ # Customizing above fake if necessary
+ for key, val in kwargs.items():
+ fake.__setattr__(key, val)
+
+ # Inevitable mocks for libvirt_conn.LibvirtConnection
+ self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
+ libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
+ libvirt_conn.LibvirtConnection._conn = fake
+
+ def create_service(self, **kwargs):
+ service_ref = {'host': kwargs.get('host', 'dummy'),
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0,
+ 'availability_zone': 'zone'}
+
+ return db.service_create(context.get_admin_context(), service_ref)
+
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
@@ -191,8 +335,8 @@ class LibvirtConnTestCase(test.TestCase):
expected_result,
'%s failed common check %d' % (xml, i))
- # This test is supposed to make sure we don't override a specifically
- # set uri
+ # This test is supposed to make sure we don't
+ # override a specifically set uri
#
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
@@ -206,6 +350,150 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(uri, testuri)
db.instance_destroy(user_context, instance_ref['id'])
+ def test_update_available_resource_works_correctly(self):
+ """Confirm compute_node table is updated successfully."""
+ org_path = FLAGS.instances_path = ''
+ FLAGS.instances_path = '.'
+
+ # Prepare mocks
+ def getVersion():
+ return 12003
+
+ def getType():
+ return 'qemu'
+
+ def listDomainsID():
+ return []
+
+ service_ref = self.create_service(host='dummy')
+ self.create_fake_libvirt_mock(getVersion=getVersion,
+ getType=getType,
+ listDomainsID=listDomainsID)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ 'get_cpu_info')
+ libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
+
+ # Start test
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.update_available_resource(self.context, 'dummy')
+ service_ref = db.service_get(self.context, service_ref['id'])
+ compute_node = service_ref['compute_node'][0]
+
+ if sys.platform.upper() == 'LINUX2':
+ self.assertTrue(compute_node['vcpus'] >= 0)
+ self.assertTrue(compute_node['memory_mb'] > 0)
+ self.assertTrue(compute_node['local_gb'] > 0)
+ self.assertTrue(compute_node['vcpus_used'] == 0)
+ self.assertTrue(compute_node['memory_mb_used'] > 0)
+ self.assertTrue(compute_node['local_gb_used'] > 0)
+ self.assertTrue(len(compute_node['hypervisor_type']) > 0)
+ self.assertTrue(compute_node['hypervisor_version'] > 0)
+ else:
+ self.assertTrue(compute_node['vcpus'] >= 0)
+ self.assertTrue(compute_node['memory_mb'] == 0)
+ self.assertTrue(compute_node['local_gb'] > 0)
+ self.assertTrue(compute_node['vcpus_used'] == 0)
+ self.assertTrue(compute_node['memory_mb_used'] == 0)
+ self.assertTrue(compute_node['local_gb_used'] > 0)
+ self.assertTrue(len(compute_node['hypervisor_type']) > 0)
+ self.assertTrue(compute_node['hypervisor_version'] > 0)
+
+ db.service_destroy(self.context, service_ref['id'])
+ FLAGS.instances_path = org_path
+
+ def test_update_resource_info_no_compute_record_found(self):
+ """Raise exception if no recorde found on services table."""
+ org_path = FLAGS.instances_path = ''
+ FLAGS.instances_path = '.'
+ self.create_fake_libvirt_mock()
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(exception.Invalid,
+ conn.update_available_resource,
+ self.context, 'dummy')
+
+ FLAGS.instances_path = org_path
+
+ def test_ensure_filtering_rules_for_instance_timeout(self):
+ """ensure_filtering_fules_for_instance() finishes with timeout."""
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Preparing mocks
+ def fake_none(self):
+ return
+
+ def fake_raise(self):
+ raise libvirt.libvirtError('ERR')
+
+ self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise)
+ instance_ref = db.instance_create(self.context, self.test_instance)
+
+ # Start test
+ self.mox.ReplayAll()
+ try:
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
+ conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
+ conn.ensure_filtering_rules_for_instance(instance_ref)
+ except exception.Error, e:
+ c1 = (0 <= e.message.find('Timeout migrating for'))
+ self.assertTrue(c1)
+
+ db.instance_destroy(self.context, instance_ref['id'])
+
+ def test_live_migration_raises_exception(self):
+ """Confirms recover method is called when exceptions are raised."""
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Preparing data
+ self.compute = utils.import_object(FLAGS.compute_manager)
+ instance_dict = {'host': 'fake', 'state': power_state.RUNNING,
+ 'state_description': 'running'}
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ instance_ref = db.instance_update(self.context, instance_ref['id'],
+ instance_dict)
+ vol_dict = {'status': 'migrating', 'size': 1}
+ volume_ref = db.volume_create(self.context, vol_dict)
+ db.volume_attached(self.context, volume_ref['id'], instance_ref['id'],
+ '/dev/fake')
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+ vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None, FLAGS.live_migration_bandwidth).\
+ AndRaise(libvirt.libvirtError('ERR'))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref.name:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ # Start test
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', '',
+ self.compute.recover_live_migration)
+
+ instance_ref = db.instance_get(self.context, instance_ref['id'])
+ self.assertTrue(instance_ref['state_description'] == 'running')
+ self.assertTrue(instance_ref['state'] == power_state.RUNNING)
+ volume_ref = db.volume_get(self.context, volume_ref['id'])
+ self.assertTrue(volume_ref['status'] == 'in-use')
+
+ db.volume_destroy(self.context, volume_ref['id'])
+ db.instance_destroy(self.context, instance_ref['id'])
+
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
@@ -234,16 +522,22 @@ class IptablesFirewallTestCase(test.TestCase):
self.manager.delete_user(self.user)
super(IptablesFirewallTestCase, self).tearDown()
- in_rules = [
+ in_nat_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ ]
+
+ in_filter_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
- '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
- '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
- '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
@@ -255,7 +549,7 @@ class IptablesFirewallTestCase(test.TestCase):
'# Completed on Mon Dec 6 11:54:13 2010',
]
- in6_rules = [
+ in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
@@ -315,23 +609,34 @@ class IptablesFirewallTestCase(test.TestCase):
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
# self.fw.add_instance(instance_ref)
- def fake_iptables_execute(cmd, process_input=None):
- if cmd == 'sudo ip6tables-save -t filter':
- return '\n'.join(self.in6_rules), None
- if cmd == 'sudo iptables-save -t filter':
- return '\n'.join(self.in_rules), None
- if cmd == 'sudo iptables-restore':
- self.out_rules = process_input.split('\n')
+ def fake_iptables_execute(*cmd, **kwargs):
+ process_input = kwargs.get('process_input', None)
+ if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
+ return '\n'.join(self.in6_filter_rules), None
+ if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
+ return '\n'.join(self.in_filter_rules), None
+ if cmd == ('sudo', 'iptables-save', '-t', 'nat'):
+ return '\n'.join(self.in_nat_rules), None
+ if cmd == ('sudo', 'iptables-restore'):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out_rules = lines
return '', ''
- if cmd == 'sudo ip6tables-restore':
- self.out6_rules = process_input.split('\n')
+ if cmd == ('sudo', 'ip6tables-restore'):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out6_rules = lines
return '', ''
- self.fw.execute = fake_iptables_execute
+ print cmd, kwargs
+
+ from nova.network import linux_net
+ linux_net.iptables_manager.execute = fake_iptables_execute
self.fw.prepare_instance_filter(instance_ref)
self.fw.apply_instance_filter(instance_ref)
- in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self.in_filter_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self.out_rules,
@@ -354,17 +659,18 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
- self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
- security_group_chain in self.out_rules,
+ regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
- self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
- '8 -j ACCEPT' % security_group_chain in self.out_rules,
+ regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
+ '--icmp-type 8 -j ACCEPT')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
- self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
- '--dports 80:81 -j ACCEPT' % security_group_chain \
- in self.out_rules,
+ regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
+ '--dports 80:81 -j ACCEPT')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['id'])
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index b40ca004b..1b1d72092 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -20,6 +20,8 @@ Tests for Volume Code.
"""
+import cStringIO
+
from nova import context
from nova import exception
from nova import db
@@ -99,7 +101,7 @@ class VolumeTestCase(test.TestCase):
def test_run_attach_detach_volume(self):
"""Make sure volume can be attached and detached from instance."""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
@@ -173,3 +175,196 @@ class VolumeTestCase(test.TestCase):
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
+
+
+class DriverTestCase(test.TestCase):
+ """Base Test class for Drivers."""
+ driver_name = "nova.volume.driver.FakeAOEDriver"
+
+ def setUp(self):
+ super(DriverTestCase, self).setUp()
+ self.flags(volume_driver=self.driver_name,
+ logging_default_format_string="%(message)s")
+ self.volume = utils.import_object(FLAGS.volume_manager)
+ self.context = context.get_admin_context()
+ self.output = ""
+
+ def _fake_execute(_command, *_args, **_kwargs):
+ """Fake _execute."""
+ return self.output, None
+ self.volume.driver._execute = _fake_execute
+ self.volume.driver._sync_execute = _fake_execute
+
+ log = logging.getLogger()
+ self.stream = cStringIO.StringIO()
+ log.addHandler(logging.StreamHandler(self.stream))
+
+ inst = {}
+ self.instance_id = db.instance_create(self.context, inst)['id']
+
+ def tearDown(self):
+ super(DriverTestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ return []
+
+ def _detach_volume(self, volume_id_list):
+ """Detach volumes from an instance."""
+ for volume_id in volume_id_list:
+ db.volume_detached(self.context, volume_id)
+ self.volume.delete_volume(self.context, volume_id)
+
+
+class AOETestCase(DriverTestCase):
+ """Test Case for AOEDriver"""
+ driver_name = "nova.volume.driver.AOEDriver"
+
+ def setUp(self):
+ super(AOETestCase, self).setUp()
+
+ def tearDown(self):
+ super(AOETestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ volume_id_list = []
+ for index in xrange(3):
+ vol = {}
+ vol['size'] = 0
+ volume_id = db.volume_create(self.context,
+ vol)['id']
+ self.volume.create_volume(self.context, volume_id)
+
+ # each volume has a different mountpoint
+ mountpoint = "/dev/sd" + chr((ord('b') + index))
+ db.volume_attached(self.context, volume_id, self.instance_id,
+ mountpoint)
+
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
+ volume_id)
+ self.output += "%s %s eth0 /dev/nova-volumes/vol-foo auto run\n" \
+ % (shelf_id, blade_id)
+
+ volume_id_list.append(volume_id)
+
+ return volume_id_list
+
+ def test_check_for_export_with_no_volume(self):
+ """No log message when no volume is attached to an instance."""
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ def test_check_for_export_with_all_vblade_processes(self):
+ """No log message when all the vblade processes are running."""
+ volume_id_list = self._attach_volume()
+
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ self._detach_volume(volume_id_list)
+
+ def test_check_for_export_with_vblade_process_missing(self):
+ """Output a warning message when some vblade processes aren't
+ running."""
+ volume_id_list = self._attach_volume()
+
+ # the first vblade process isn't running
+ self.output = self.output.replace("run", "down", 1)
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
+ volume_id_list[0])
+
+ msg_is_match = False
+ self.stream.truncate(0)
+ try:
+ self.volume.check_for_export(self.context, self.instance_id)
+ except exception.ProcessExecutionError, e:
+ volume_id = volume_id_list[0]
+ msg = _("Cannot confirm exported volume id:%(volume_id)s. "
+ "vblade process for e%(shelf_id)s.%(blade_id)s "
+ "isn't running.") % locals()
+
+ msg_is_match = (0 <= e.message.find(msg))
+
+ self.assertTrue(msg_is_match)
+ self._detach_volume(volume_id_list)
+
+
+class ISCSITestCase(DriverTestCase):
+ """Test Case for ISCSIDriver"""
+ driver_name = "nova.volume.driver.ISCSIDriver"
+
+ def setUp(self):
+ super(ISCSITestCase, self).setUp()
+
+ def tearDown(self):
+ super(ISCSITestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ volume_id_list = []
+ for index in xrange(3):
+ vol = {}
+ vol['size'] = 0
+ vol_ref = db.volume_create(self.context, vol)
+ self.volume.create_volume(self.context, vol_ref['id'])
+ vol_ref = db.volume_get(self.context, vol_ref['id'])
+
+ # each volume has a different mountpoint
+ mountpoint = "/dev/sd" + chr((ord('b') + index))
+ db.volume_attached(self.context, vol_ref['id'], self.instance_id,
+ mountpoint)
+ volume_id_list.append(vol_ref['id'])
+
+ return volume_id_list
+
+ def test_check_for_export_with_no_volume(self):
+ """No log message when no volume is attached to an instance."""
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ def test_check_for_export_with_all_volume_exported(self):
+ """No log message when all the vblade processes are running."""
+ volume_id_list = self._attach_volume()
+
+ self.mox.StubOutWithMock(self.volume.driver, '_execute')
+ for i in volume_id_list:
+ tid = db.volume_get_iscsi_target_num(self.context, i)
+ self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
+ % locals())
+
+ self.stream.truncate(0)
+ self.mox.ReplayAll()
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+ self.mox.UnsetStubs()
+
+ self._detach_volume(volume_id_list)
+
+ def test_check_for_export_with_some_volume_missing(self):
+ """Output a warning message when some volumes are not recognied
+ by ietd."""
+ volume_id_list = self._attach_volume()
+
+ # the first vblade process isn't running
+ tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
+ self.mox.StubOutWithMock(self.volume.driver, '_execute')
+ self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
+ % locals()).AndRaise(exception.ProcessExecutionError())
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ProcessExecutionError,
+ self.volume.check_for_export,
+ self.context,
+ self.instance_id)
+ msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
+ self.assertTrue(0 <= self.stream.getvalue().find(msg))
+ self.mox.UnsetStubs()
+
+ self._detach_volume(volume_id_list)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index c26dc8639..8b0affd5c 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -18,6 +18,7 @@
Test suite for XenAPI
"""
+import functools
import stubout
from nova import db
@@ -41,6 +42,21 @@ from nova.tests.glance import stubs as glance_stubs
FLAGS = flags.FLAGS
+def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
+ """
+ vm_utils.with_vdi_attached_here needs to be stubbed out because it
+ calls down to the filesystem to attach a vdi. This provides a
+ decorator to handle that.
+ """
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ orig_with_vdi_attached_here = vm_utils.with_vdi_attached_here
+ vm_utils.with_vdi_attached_here = lambda *x: should_return
+ function(self, *args, **kwargs)
+ vm_utils.with_vdi_attached_here = orig_with_vdi_attached_here
+ return decorated_function
+
+
class XenAPIVolumeTestCase(test.TestCase):
"""
Unit tests for Volume operations
@@ -62,7 +78,7 @@ class XenAPIVolumeTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
+ 'os_type': 'linux'}
def _create_volume(self, size='0'):
"""Create a volume object."""
@@ -219,7 +235,7 @@ class XenAPIVMTestCase(test.TestCase):
check()
- def check_vm_record(self, conn):
+ def create_vm_record(self, conn, os_type):
instances = conn.list_instances()
self.assertEquals(instances, [1])
@@ -231,28 +247,63 @@ class XenAPIVMTestCase(test.TestCase):
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
+ self.vm_info = vm_info
+ self.vm = vm
+ def check_vm_record(self, conn):
# Check that m1.large above turned into the right thing.
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
- self.assertEquals(vm_info['max_mem'], mem_kib)
- self.assertEquals(vm_info['mem'], mem_kib)
- self.assertEquals(vm['memory_static_max'], mem_bytes)
- self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
- self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
- self.assertEquals(vm['VCPUs_max'], str(vcpus))
- self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
+ self.assertEquals(self.vm_info['max_mem'], mem_kib)
+ self.assertEquals(self.vm_info['mem'], mem_kib)
+ self.assertEquals(self.vm['memory_static_max'], mem_bytes)
+ self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
+ self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
+ self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
+ self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
- self.assertEquals(vm_info['state'], power_state.RUNNING)
+ self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
- self.assertEquals(vm['power_state'], 'Running')
+ self.assertEquals(self.vm['power_state'], 'Running')
+
+ def check_vm_params_for_windows(self):
+ self.assertEquals(self.vm['platform']['nx'], 'true')
+ self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
+ self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
+
+ # check that these are not set
+ self.assertEquals(self.vm['PV_args'], '')
+ self.assertEquals(self.vm['PV_bootloader'], '')
+ self.assertEquals(self.vm['PV_kernel'], '')
+ self.assertEquals(self.vm['PV_ramdisk'], '')
+
+ def check_vm_params_for_linux(self):
+ self.assertEquals(self.vm['platform']['nx'], 'false')
+ self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
+ self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
+
+ # check that these are not set
+ self.assertEquals(self.vm['PV_kernel'], '')
+ self.assertEquals(self.vm['PV_ramdisk'], '')
+ self.assertEquals(self.vm['HVM_boot_params'], {})
+ self.assertEquals(self.vm['HVM_boot_policy'], '')
+
+ def check_vm_params_for_linux_with_external_kernel(self):
+ self.assertEquals(self.vm['platform']['nx'], 'false')
+ self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
+ self.assertNotEquals(self.vm['PV_kernel'], '')
+ self.assertNotEquals(self.vm['PV_ramdisk'], '')
+
+ # check that these are not set
+ self.assertEquals(self.vm['HVM_boot_params'], {})
+ self.assertEquals(self.vm['HVM_boot_policy'], '')
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
- instance_type="m1.large"):
+ instance_type="m1.large", os_type="linux"):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
values = {'name': 1,
'id': 1,
@@ -263,10 +314,12 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': ramdisk_id,
'instance_type': instance_type,
'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
+ 'os_type': os_type}
+
conn = xenapi_conn.get_connection(False)
instance = db.instance_create(values)
conn.spawn(instance)
+ self.create_vm_record(conn, os_type)
self.check_vm_record(conn)
def test_spawn_not_enough_memory(self):
@@ -283,24 +336,37 @@ class XenAPIVMTestCase(test.TestCase):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, 2, 3)
+ @stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
+ self.check_vm_params_for_linux()
- def test_spawn_vhd_glance(self):
+ def test_spawn_vhd_glance_linux(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
+ os_type="linux")
+ self.check_vm_params_for_linux()
+
+ def test_spawn_vhd_glance_windows(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
+ os_type="windows")
+ self.check_vm_params_for_windows()
def test_spawn_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
+ self.check_vm_params_for_linux_with_external_kernel()
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+ self.vm_info = None
+ self.vm = None
self.stubs.UnsetAll()
def _create_instance(self):
@@ -314,7 +380,8 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
- 'mac_address': 'aa:bb:cc:dd:ee:ff'}
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': 'linux'}
instance = db.instance_create(values)
self.conn.spawn(instance)
return instance
@@ -372,7 +439,8 @@ class XenAPIMigrateInstance(test.TestCase):
'ramdisk_id': None,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
+ 'os_type': 'linux'}
+
stubs.stub_out_migration_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
@@ -410,6 +478,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
+ self.fake_instance.os_type = 'linux'
def assert_disk_type(self, disk_type):
dt = vm_utils.VMHelper.determine_disk_image_type(