From 63d799a5ac6172b73708a183f3d952a2c8b53c2b Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 3 Mar 2011 11:56:21 -0600 Subject: Basic notifications drivers and tests --- nova/flags.py | 3 +++ nova/notifier/__init__.py | 19 +++++++++++++++++++ nova/notifier/no_op_notifier.py | 19 +++++++++++++++++++ nova/notifier/rabbit_notifier.py | 24 +++++++++++++++++++++++ nova/tests/test_notifier.py | 41 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 106 insertions(+) create mode 100644 nova/notifier/__init__.py create mode 100644 nova/notifier/no_op_notifier.py create mode 100644 nova/notifier/rabbit_notifier.py create mode 100644 nova/tests/test_notifier.py (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index 8cf199b2f..7b4723b50 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -354,3 +354,6 @@ DEFINE_string('host', socket.gethostname(), DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') + +DEFINE_string('notification_driver', 'nova.notifier.no_op_driver.NoopDriver', + 'Default driver for sending notifications') diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py new file mode 100644 index 000000000..3bf60cba9 --- /dev/null +++ b/nova/notifier/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +def notify(model): + """Sends a notification using the specified driver""" + driver = FLAGS.notification_driver + driver.notify(model) diff --git a/nova/notifier/no_op_notifier.py b/nova/notifier/no_op_notifier.py new file mode 100644 index 000000000..c0d418564 --- /dev/null +++ b/nova/notifier/no_op_notifier.py @@ -0,0 +1,19 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class NoopNotifier(object): + def notify(self, model): + """Notifies the recipient of the desired event given the model""" + pass diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py new file mode 100644 index 000000000..7a5802fb9 --- /dev/null +++ b/nova/notifier/rabbit_notifier.py @@ -0,0 +1,24 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class RabbitNotifier(object): + """Sends notifications to a specific RabbitMQ server and topic""" + + def __init__(self): + pass + + def notify(self, model): + """Sends a notification to the RabbitMQ""" + pass diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py new file mode 100644 index 000000000..831ae8bf3 --- /dev/null +++ b/nova/tests/test_notifier.py @@ -0,0 +1,41 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import notifier +from nova import test + +import stubout + +class NotifierTestCase(test.TestCase): + """Test case for notifications""" + def setUp(self): + super(NotifierTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + + def tearDown(self): + self.stubs.UnsetAll() + super(NotifierTestCase, self).tearDown() + + def test_send_notification(self): + self.notify_called = False + def mock_notify(self, model): + self.notify_called = True + + self.stubs.set(nova.notifier.no_op_notifier.NoopNotifier, 'notify', + mock_notify) + + model = dict(x=1, y=2) + notifier.notify(model) + self.assertEqual(True, self.notify_called) -- cgit From cff74a76e6369989e8006aa9d7c20fde14b31952 Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Fri, 4 Mar 2011 19:24:55 +0000 Subject: More unit tests and rabbit hooks --- nova/flags.py | 3 ++- nova/notifier/__init__.py | 11 ++++++++--- nova/notifier/no_op_notifier.py | 2 +- nova/notifier/rabbit_notifier.py | 23 ++++++++++++++++++----- nova/tests/test_notifier.py | 29 ++++++++++++++++++++++++----- 5 files changed, 53 insertions(+), 15 deletions(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index 7b4723b50..c2259433f 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -355,5 +355,6 @@ DEFINE_string('host', socket.gethostname(), DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') -DEFINE_string('notification_driver', 'nova.notifier.no_op_driver.NoopDriver', +DEFINE_string('notification_driver', + 'nova.notifier.no_op_notifier.NoopNotifier', 'Default driver for sending notifications') diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py index 3bf60cba9..8053b8a0a 100644 --- a/nova/notifier/__init__.py +++ b/nova/notifier/__init__.py @@ -13,7 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. -def notify(model): +from nova import flags +from nova import utils + +FLAGS = flags.FLAGS + +def notify(event_name, model): """Sends a notification using the specified driver""" - driver = FLAGS.notification_driver - driver.notify(model) + driver = utils.import_class(FLAGS.notification_driver)() + driver.notify(event_name, model) diff --git a/nova/notifier/no_op_notifier.py b/nova/notifier/no_op_notifier.py index c0d418564..3fefe6f8f 100644 --- a/nova/notifier/no_op_notifier.py +++ b/nova/notifier/no_op_notifier.py @@ -14,6 +14,6 @@ # under the License. class NoopNotifier(object): - def notify(self, model): + def notify(self, event_name, model): """Notifies the recipient of the desired event given the model""" pass diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py index 7a5802fb9..33cf06566 100644 --- a/nova/notifier/rabbit_notifier.py +++ b/nova/notifier/rabbit_notifier.py @@ -13,12 +13,25 @@ # License for the specific language governing permissions and limitations # under the License. +import json + +import nova.context + +from nova import flags +from nova import rpc + +FLAGS = flags.FLAGS + +flags.DEFINE_string('notification_topic', 'notifications', + 'RabbitMQ topic used for Nova notifications') + class RabbitNotifier(object): """Sends notifications to a specific RabbitMQ server and topic""" + pass - def __init__(self): - pass - - def notify(self, model): + def notify(self, event_name, model): """Sends a notification to the RabbitMQ""" - pass + context = nova.context.get_admin_context() + topic = FLAGS.notification_topic + msg = { 'event_name': event_name, 'model': model.__dict__ } + rpc.cast(context, topic, json.dumps(msg)) diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 831ae8bf3..4d6289e6a 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -13,7 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. +import nova + +from nova import flags from nova import notifier +from nova.notifier import no_op_notifier from nova import test import stubout @@ -30,12 +34,27 @@ class NotifierTestCase(test.TestCase): def test_send_notification(self): self.notify_called = False - def mock_notify(self, model): + def mock_notify(cls, *args): self.notify_called = True - self.stubs.set(nova.notifier.no_op_notifier.NoopNotifier, 'notify', + self.stubs.Set(nova.notifier.no_op_notifier.NoopNotifier, 'notify', mock_notify) - model = dict(x=1, y=2) - notifier.notify(model) - self.assertEqual(True, self.notify_called) + class Mock(object): + pass + notifier.notify('derp', Mock()) + self.assertEqual(self.notify_called, True) + + def test_send_rabbit_notification(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier.RabbitNotifier') + self.mock_cast = False + def mock_cast(cls, *args): + self.mock_cast = True + + class Mock(object): + pass + self.stubs.Set(nova.rpc, 'cast', mock_cast) + notifier.notify('derp', Mock()) + + self.assertEqual(self.mock_cast, True) -- cgit From 5a9d2eb44ced0affe143e6274c9c9326f1c2d7da Mon Sep 17 00:00:00 2001 From: John Tran Date: Fri, 18 Mar 2011 11:49:11 -0700 Subject: created api endpoint to allow uploading of public key --- nova/api/ec2/cloud.py | 12 ++++++++++++ nova/tests/test_cloud.py | 16 ++++++++++++++++ 2 files changed, 28 insertions(+) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index cadda97db..6fe01b0e9 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -302,6 +302,18 @@ class CloudController(object): 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here + def import_public_key(self, context, key_name, public_key, + fingerprint=None): + LOG.audit(_("Import key %s"), key_name, context=context) + key = {} + key['user_id'] = context.user_id + key['name'] = key_name + key['public_key'] = public_key + if fingerprint: + key['fingerprint'] = fingerprint + db.key_pair_create(context, key) + return True + def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index cf8ee7eff..03b1ad2fc 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -279,6 +279,22 @@ class CloudTestCase(test.TestCase): self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) + def test_import_public_key(self): + result = self.cloud.import_public_key(self.context, + 'testimportkey', 'mytestpubkey', 'mytestfprint') + self.assertTrue(result) + keydata = db.key_pair_get(self.context, + self.context.user.id, + 'testimportkey') + print "PUBLIC_KEY:" + file = open('/tmp/blah', 'w') + file.write(keydata['public_key']) + file.close() + print keydata['public_key'] + self.assertEqual('mytestpubkey', keydata['public_key']) + self.assertEqual('mytestfprint', keydata['fingerprint']) + self.assertTrue(1) + def test_delete_key_pair(self): self._create_key('test') self.cloud.delete_key_pair(self.context, 'test') -- cgit From 15a40f842cb8a4362fbd82e36e3df4af7ab46a84 Mon Sep 17 00:00:00 2001 From: John Tran Date: Fri, 18 Mar 2011 12:17:40 -0700 Subject: cleaned up tests stubs that were accidentally checked in --- nova/tests/test_cloud.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 03b1ad2fc..3a266c996 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -281,19 +281,15 @@ class CloudTestCase(test.TestCase): def test_import_public_key(self): result = self.cloud.import_public_key(self.context, - 'testimportkey', 'mytestpubkey', 'mytestfprint') + 'testimportkey', + 'mytestpubkey', + 'mytestfprint') self.assertTrue(result) keydata = db.key_pair_get(self.context, self.context.user.id, 'testimportkey') - print "PUBLIC_KEY:" - file = open('/tmp/blah', 'w') - file.write(keydata['public_key']) - file.close() - print keydata['public_key'] self.assertEqual('mytestpubkey', keydata['public_key']) self.assertEqual('mytestfprint', keydata['fingerprint']) - self.assertTrue(1) def test_delete_key_pair(self): self._create_key('test') -- cgit From a105fd449a0b91cde3ab86cc552705dfe50e3f6d Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 21 Mar 2011 14:35:19 -0700 Subject: if fingerprint data not provided, added logic to calculate it using the pub key. --- nova/api/ec2/cloud.py | 15 +++++++++++++-- nova/tests/public_key/dummy.fingerprint | 1 + nova/tests/public_key/dummy.pub | 1 + nova/tests/test_cloud.py | 32 +++++++++++++++++++++++++------- 4 files changed, 40 insertions(+), 9 deletions(-) create mode 100644 nova/tests/public_key/dummy.fingerprint create mode 100644 nova/tests/public_key/dummy.pub (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 6fe01b0e9..8ec74fbe0 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -27,6 +27,8 @@ import datetime import IPy import os import urllib +import tempfile +import shutil from nova import compute from nova import context @@ -309,8 +311,17 @@ class CloudController(object): key['user_id'] = context.user_id key['name'] = key_name key['public_key'] = public_key - if fingerprint: - key['fingerprint'] = fingerprint + if fingerprint is None: + tmpdir = tempfile.mkdtemp() + pubfile = os.path.join(tmpdir, 'temp.pub') + fh = open(pubfile, 'w') + fh.write(public_key) + fh.close() + (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', + '%s' % (pubfile)) + fingerprint = out.split(' ')[1] + shutil.rmtree(tmpdir) + key['fingerprint'] = fingerprint db.key_pair_create(context, key) return True diff --git a/nova/tests/public_key/dummy.fingerprint b/nova/tests/public_key/dummy.fingerprint new file mode 100644 index 000000000..715bca27a --- /dev/null +++ b/nova/tests/public_key/dummy.fingerprint @@ -0,0 +1 @@ +1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df diff --git a/nova/tests/public_key/dummy.pub b/nova/tests/public_key/dummy.pub new file mode 100644 index 000000000..d4cf2bc0d --- /dev/null +++ b/nova/tests/public_key/dummy.pub @@ -0,0 +1 @@ +ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 3a266c996..c49a39ed0 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -280,16 +280,34 @@ class CloudTestCase(test.TestCase): self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) def test_import_public_key(self): - result = self.cloud.import_public_key(self.context, - 'testimportkey', - 'mytestpubkey', - 'mytestfprint') - self.assertTrue(result) + # test when user provides all values + result1 = self.cloud.import_public_key(self.context, + 'testimportkey1', + 'mytestpubkey', + 'mytestfprint') + self.assertTrue(result1) keydata = db.key_pair_get(self.context, - self.context.user.id, - 'testimportkey') + self.context.user.id, + 'testimportkey1') self.assertEqual('mytestpubkey', keydata['public_key']) self.assertEqual('mytestfprint', keydata['fingerprint']) + # test when user omits fingerprint + pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key') + f = open(pubkey_path + '/dummy.pub', 'r') + dummypub = f.readline().rstrip() + f.close + f = open(pubkey_path + '/dummy.fingerprint', 'r') + dummyfprint = f.readline().rstrip() + f.close + result2 = self.cloud.import_public_key(self.context, + 'testimportkey2', + dummypub) + self.assertTrue(result2) + keydata = db.key_pair_get(self.context, + self.context.user.id, + 'testimportkey2') + self.assertEqual(dummypub, keydata['public_key']) + self.assertEqual(dummyfprint, keydata['fingerprint']) def test_delete_key_pair(self): self._create_key('test') -- cgit From 062301faf57d1e07b5068ae90c91c8c7da460e1f Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 28 Mar 2011 09:28:18 -0700 Subject: Start up nova-api service on an unused port if 0 is specified. Fixes bug 744150 --- nova/service.py | 6 ++++++ nova/tests/integrated/integrated_helpers.py | 3 ++- nova/tests/integrated/test_login.py | 1 + nova/wsgi.py | 2 ++ 4 files changed, 11 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/service.py b/nova/service.py index 47c0b96c0..e399273a0 100644 --- a/nova/service.py +++ b/nova/service.py @@ -248,6 +248,12 @@ class WsgiService(object): def wait(self): self.wsgi_app.wait() + def get_port(self, api): + for i in xrange(len(self.apis)): + if self.apis[i] == api: + return self.wsgi_app.ports[i] + return None + class ApiService(WsgiService): """Class for our nova-api service""" diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index cc7326e73..752563e89 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -141,6 +141,7 @@ class IntegratedUnitTestContext(object): self.api_service = api_service - self.auth_url = 'http://localhost:8774/v1.0' + host, port = api_service.get_port('osapi') + self.auth_url = 'http://%s:%s/v1.0' % (host, port) return api_service diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py index 6b241f240..764f3326d 100644 --- a/nova/tests/integrated/test_login.py +++ b/nova/tests/integrated/test_login.py @@ -33,6 +33,7 @@ FLAGS.verbose = True class LoginTest(test.TestCase): def setUp(self): super(LoginTest, self).setUp() + self.flags(ec2_listen_port=0, osapi_listen_port=0) self.context = integrated_helpers.IntegratedUnitTestContext() self.user = self.context.test_user self.api = self.user.openstack_api diff --git a/nova/wsgi.py b/nova/wsgi.py index ba0819466..54401f998 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -61,6 +61,7 @@ class Server(object): def __init__(self, threads=1000): self.pool = eventlet.GreenPool(threads) + self.ports = [] def start(self, application, port, host='0.0.0.0', backlog=128): """Run a WSGI server with the given application.""" @@ -68,6 +69,7 @@ class Server(object): logging.audit(_("Starting %(arg0)s on %(host)s:%(port)s") % locals()) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) + self.ports.append(socket.getsockname()) def wait(self): """Wait until all servers have completed running.""" -- cgit From 131b7da40946b12bae59ebcc8f1c3d66d0cb5cff Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 30 Mar 2011 09:04:29 -0700 Subject: Store socket_info as a dictionary rather than an array --- nova/service.py | 14 +++++++------- nova/tests/integrated/integrated_helpers.py | 2 +- nova/wsgi.py | 7 ++++--- 3 files changed, 12 insertions(+), 11 deletions(-) (limited to 'nova') diff --git a/nova/service.py b/nova/service.py index e399273a0..1405da605 100644 --- a/nova/service.py +++ b/nova/service.py @@ -248,11 +248,9 @@ class WsgiService(object): def wait(self): self.wsgi_app.wait() - def get_port(self, api): - for i in xrange(len(self.apis)): - if self.apis[i] == api: - return self.wsgi_app.ports[i] - return None + def get_socket_info(self, api_name): + """Returns the (host, port) that an API was started on.""" + return self.wsgi_app.socket_info[api_name] class ApiService(WsgiService): @@ -331,8 +329,10 @@ def _run_wsgi(paste_config_file, apis): logging.debug(_("App Config: %(api)s\n%(config)r") % locals()) logging.info(_("Running %s API"), api) app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_listen_port" % api), - getattr(FLAGS, "%s_listen" % api))) + apps.append((app, + getattr(FLAGS, "%s_listen_port" % api), + getattr(FLAGS, "%s_listen" % api), + api)) if len(apps) == 0: logging.error(_("No known API applications configured in %s."), paste_config_file) diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 5169dcb2e..14b2e06c6 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -176,7 +176,7 @@ class _IntegratedTestBase(test.TestCase): self.api_service = api_service - host, port = api_service.get_port('osapi') + host, port = api_service.get_socket_info('osapi') self.auth_url = 'http://%s:%s/v1.0' % (host, port) def tearDown(self): diff --git a/nova/wsgi.py b/nova/wsgi.py index 54401f998..2487ada22 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -61,15 +61,16 @@ class Server(object): def __init__(self, threads=1000): self.pool = eventlet.GreenPool(threads) - self.ports = [] + self.socket_info = {} - def start(self, application, port, host='0.0.0.0', backlog=128): + def start(self, application, port, host='0.0.0.0', key=None, backlog=128): """Run a WSGI server with the given application.""" arg0 = sys.argv[0] logging.audit(_("Starting %(arg0)s on %(host)s:%(port)s") % locals()) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) - self.ports.append(socket.getsockname()) + if key: + self.socket_info[key] = socket.getsockname() def wait(self): """Wait until all servers have completed running.""" -- cgit From 4ab6962fb7461573119297aa3508f7df8c6efa42 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 30 Mar 2011 09:08:36 -0700 Subject: Fixed mis-merge: OS API version still has to be v1.1 --- nova/tests/integrated/integrated_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 14b2e06c6..bc98921f0 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -177,7 +177,7 @@ class _IntegratedTestBase(test.TestCase): self.api_service = api_service host, port = api_service.get_socket_info('osapi') - self.auth_url = 'http://%s:%s/v1.0' % (host, port) + self.auth_url = 'http://%s:%s/v1.1' % (host, port) def tearDown(self): self.context.cleanup() -- cgit From d3de6cd1b2997e495a000b998b321346e2a75306 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Fri, 8 Apr 2011 14:46:26 -0700 Subject: Fixes euca-attach-volume for iscsi using Xenserver Minor changes required to xenapi functions to get correct format for volume-id, iscsi-host, etc. --- nova/virt/xenapi/volume_utils.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 72284ac02..27964cac0 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -209,9 +209,9 @@ def _get_volume_id(path_or_id): # see compute/manager->setup_compute_volume volume_id = path_or_id[path_or_id.find('/vol-') + 1:] if volume_id == path_or_id: - volume_id = path_or_id[path_or_id.find('-vol-') + 1:] - volume_id = volume_id.replace('--', '-') - return volume_id + volume_id = path_or_id[path_or_id.find('-volume--') + 1:] + volume_id = volume_id.replace('volume--', '') + return int(volume_id) def _get_target_host(iscsi_string): @@ -244,25 +244,21 @@ def _get_target(volume_id): Gets iscsi name and portal from volume name and host. For this method to work the following are needed: 1) volume_ref['host'] must resolve to something rather than loopback - 2) ietd must bind only to the address as resolved above - If any of the two conditions are not met, fall back on Flags. """ - volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), + volume_ref = db.volume_get(context.get_admin_context(), volume_id) result = (None, None) try: - (r, _e) = utils.execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % - volume_ref['host']) + (r, _e) = utils.execute('sudo', 'iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', volume_ref['host']) except exception.ProcessExecutionError, exc: LOG.exception(exc) else: - targets = r.splitlines() - if len(_e) == 0 and len(targets) == 1: - for target in targets: - if volume_id in target: + volume_name = "volume-%08x" % volume_id + for target in r.splitlines(): + if FLAGS.iscsi_ip_prefix in target and volume_name in target: (location, _sep, iscsi_name) = target.partition(" ") break - iscsi_portal = location.split(",")[0] - result = (iscsi_name, iscsi_portal) + iscsi_portal = location.split(",")[0] + result = (iscsi_name, iscsi_portal) return result -- cgit From 12ec5f5c0d6a88779780b15b6ef38a016d6aae4a Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 12 Apr 2011 08:04:55 -0700 Subject: Add new flag 'max_kernel_ramdisk_size' to specify a maximum size of kernel or ramdisk so we don't copy large files to dom0 and fill up /boot/guest --- nova/virt/xenapi/vm_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index d2045a557..dd1fd9383 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -51,6 +51,8 @@ FLAGS = flags.FLAGS flags.DEFINE_string('default_os_type', 'linux', 'Default OS type') flags.DEFINE_integer('block_device_creation_timeout', 10, 'time to wait for a block device to be created') +flags.DEFINE_integer('max_kernel_ramdisk_size', 16 * 1024 * 1024, + 'maximum size in bytes of kernel or ramdisk images') XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -448,6 +450,12 @@ class VMHelper(HelperBase): if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES + elif image_type == ImageType.KERNEL_RAMDISK and \ + vdi_size > FLAGS.max_kernel_ramdisk_size: + max_size = FLAGS.max_kernel_ramdisk_size + raise exception.Error( + _("Kernel/Ramdisk image is too large, %(vdi_size)d bytes " + "(max %(max_size)d bytes)") % locals()) name_label = get_name_label_for_image(image) vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) -- cgit From bc953f37560b7353b9b8c86e8d0bdaa5672d3acd Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Tue, 12 Apr 2011 15:20:30 -0700 Subject: Minor fixes --- nova/virt/xenapi/volume_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 27964cac0..819c48be5 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -250,7 +250,7 @@ def _get_target(volume_id): result = (None, None) try: (r, _e) = utils.execute('sudo', 'iscsiadm', '-m', 'discovery', - '-t', 'sendtargets', '-p', volume_ref['host']) + '-t', 'sendtargets', '-p', volume_ref['host']) except exception.ProcessExecutionError, exc: LOG.exception(exc) else: -- cgit From c04b0caca4a725be390271be30bf8a034aa5ca9d Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 13 Apr 2011 10:10:40 -0700 Subject: Minor formatting cleanup --- nova/virt/xenapi/vm_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index dd1fd9383..32a617ef4 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -454,8 +454,8 @@ class VMHelper(HelperBase): vdi_size > FLAGS.max_kernel_ramdisk_size: max_size = FLAGS.max_kernel_ramdisk_size raise exception.Error( - _("Kernel/Ramdisk image is too large, %(vdi_size)d bytes " - "(max %(max_size)d bytes)") % locals()) + _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, " + "max %(max_size)d bytes") % locals()) name_label = get_name_label_for_image(image) vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) -- cgit From 8c66d79a41044837a0865b1a706dd89e788597d1 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 14 Apr 2011 20:57:11 +0900 Subject: add kvm-pause and kvm-suspend --- nova/virt/libvirt_conn.py | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 6ec15fbb8..66f43e786 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -545,19 +545,54 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def pause(self, instance, callback): - raise exception.ApiError("pause not supported for libvirt.") + """Pause VM instance""" + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + dom.suspend() + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.suspend() @exception.wrap_exception def unpause(self, instance, callback): - raise exception.ApiError("unpause not supported for libvirt.") + """Unpause paused VM instance""" + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + dom.resume() + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.resume() @exception.wrap_exception def suspend(self, instance, callback): - raise exception.ApiError("suspend not supported for libvirt") + """Suspend the specified instance""" + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + dom.managedSave(0) + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.managedSave(0) @exception.wrap_exception def resume(self, instance, callback): - raise exception.ApiError("resume not supported for libvirt") + """resume the specified instance""" + try: + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance.name) + tmpconn.close() + else: + dom = self._conn.lookupByName(instance.name) + dom.create() + except libvirt.LibvirtError: + xml = self.to_xml(instance, None) + self._create_new_domain(xml) @exception.wrap_exception def rescue(self, instance, callback=None): -- cgit From aacb64391a8d9802365746308f9ece8e73dc9dae Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 15 Apr 2011 17:32:15 -0500 Subject: Rename the id --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 39d7af9c1..839b84790 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -550,7 +550,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_type = self.db.instance_type_get_by_flavor_id(context, migration_ref['new_flavor_id']) self.db.instance_update(context, instance_id, - dict(instance_type=instance_type['name'], + dict(instance_type_id=instance_type['id'], memory_mb=instance_type['memory_mb'], vcpus=instance_type['vcpus'], local_gb=instance_type['local_gb'])) -- cgit From 0ba085928c75f2fc27fb03eaa3aaeff6618e8875 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:48:26 +0900 Subject: Add support for creating a snapshot of a nova volume with euca-create-snapshot. --- nova/api/ec2/__init__.py | 6 ++ nova/api/ec2/cloud.py | 52 ++++++++++++--- nova/db/api.py | 39 +++++++++++ nova/db/sqlalchemy/api.py | 77 ++++++++++++++++++++++ .../versions/015_add_volume_snapshot_support.py | 71 ++++++++++++++++++++ nova/db/sqlalchemy/models.py | 24 +++++++ nova/exception.py | 50 ++++++++++++++ nova/volume/api.py | 44 +++++++++++++ nova/volume/driver.py | 8 +++ nova/volume/manager.py | 42 ++++++++++++ 10 files changed, 405 insertions(+), 8 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py (limited to 'nova') diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index cd59340bd..4a49a5a6b 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -327,6 +327,12 @@ class Executor(wsgi.Application): ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x') message = _('Volume %s not found') % ec2_id return self._error(req, context, type(ex).__name__, message) + except exception.SnapshotNotFound as ex: + LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), + context=context) + ec2_id = ec2utils.id_to_ec2_id(ex.snapshot_id, 'snap-%08x') + message = _('Snapshot %s not found') % ec2_id + return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 092b80fa2..f5360af0b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -280,14 +280,46 @@ class CloudController(object): owner=None, restorable_by=None, **kwargs): - return {'snapshotSet': [{'snapshotId': 'fixme', - 'volumeId': 'fixme', - 'status': 'fixme', - 'startTime': 'fixme', - 'progress': 'fixme', - 'ownerId': 'fixme', - 'volumeSize': 0, - 'description': 'fixme'}]} + if snapshot_id: + snapshots = [] + for ec2_id in snapshot_id: + internal_id = ec2utils.ec2_id_to_id(ec2_id) + snapshot = self.volume_api.get_snapshot(context, snapshot_id=internal_id) + snapshots.append(snapshot) + else: + snapshots = self.volume_api.get_all_snapshots(context) + snapshots = [self._format_snapshot(context, s) for s in snapshots] + return {'snapshotSet': snapshots} + + def _format_snapshot(self, context, snapshot): + s = {} + s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x') + s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], 'vol-%08x') + s['status'] = snapshot['status'] + s['startTime'] = snapshot['created_at'] + s['progress'] = snapshot['progress'] + s['ownerId'] = snapshot['project_id'] + s['volumeSize'] = snapshot['volume_size'] + s['description'] = snapshot['display_description'] + + s['display_name'] = snapshot['display_name'] + s['display_description'] = snapshot['display_description'] + return s + + def create_snapshot(self, context, volume_id, **kwargs): + LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) + volume_id = ec2utils.ec2_id_to_id(volume_id) + snapshot = self.volume_api.create_snapshot( + context, + volume_id=volume_id, + name=kwargs.get('display_name'), + description=kwargs.get('display_description')) + return {'snapshotSet': [self._format_snapshot(context, snapshot)]} + + def delete_snapshot(self, context, snapshot_id, **kwargs): + snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) + self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id) + return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) @@ -595,6 +627,10 @@ class CloudController(object): 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] + if volume.get('snapshot_id') != None: + v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], 'snap-%08x') + else: + v['snapshotId'] = None v['display_name'] = volume['display_name'] v['display_description'] = volume['display_description'] diff --git a/nova/db/api.py b/nova/db/api.py index f9a4b5b4b..57e585a9c 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -47,6 +47,8 @@ flags.DEFINE_string('instance_name_template', 'instance-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') +flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', + 'Template string to be used to generate instance names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], @@ -871,6 +873,43 @@ def volume_update(context, volume_id, values): #################### +def snapshot_create(context, values): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_create(context, values) + + +def snapshot_destroy(context, snapshot_id): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_destroy(context, snapshot_id) + + +def snapshot_get(context, snapshot_id): + """Get a volume or raise if it does not exist.""" + return IMPL.snapshot_get(context, snapshot_id) + + +def snapshot_get_all(context): + """Get all volumes.""" + return IMPL.snapshot_get_all(context) + + +def snapshot_get_all_by_project(context, project_id): + """Get all volumes belonging to a project.""" + return IMPL.snapshot_get_all_by_project(context, project_id) + + +def snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + + """ + return IMPL.snapshot_update(context, snapshot_id, values) + + +#################### + + def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 285b22a04..ebdb2ad5c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1758,6 +1758,83 @@ def volume_update(context, volume_id, values): ################### +@require_context +def snapshot_create(context, values): + snapshot_ref = models.Snapshot() + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + return snapshot_ref + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def snapshot_get(context, snapshot_id, session=None): + if not session: + session = get_session() + result = None + + if is_admin_context(context): + result = session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + elif is_user_context(context): + result = session.query(models.Snapshot).\ + filter_by(project_id=context.project_id).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=False).\ + first() + if not result: + raise exception.SnapshotNotFound(_('Snapshot %s not found') % snapshot_id, + snapshot_id) + + return result + + +@require_admin_context +def snapshot_get_all(context): + session = get_session() + return session.query(models.Snapshot).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.Snapshot).\ + filter_by(project_id=project_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + + +################### + + @require_context def security_group_get_all(context): session = get_session() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py new file mode 100644 index 000000000..288f63e72 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + +meta = MetaData() + +snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + snapshots.create() + except Exception: + logging.info(repr(snapshots)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + snapshots.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 36a084a1d..2e0ead5f9 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -327,6 +327,30 @@ class Quota(BASE, NovaBase): metadata_items = Column(Integer) +class Snapshot(BASE, NovaBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'snapshots' + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return FLAGS.snapshot_name_template % self.id + + @property + def volume_name(self): + return FLAGS.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(Integer) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' diff --git a/nova/exception.py b/nova/exception.py index 9905fb19b..2dffeb795 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -60,6 +60,56 @@ class ApiError(Error): class BuildInProgress(Error): + super(ApiError, self).__init__('%s: %s' % (code, message)) + + +class NotFound(Error): + pass + + +class InstanceNotFound(NotFound): + def __init__(self, message, instance_id): + self.instance_id = instance_id + super(InstanceNotFound, self).__init__(message) + + +class VolumeNotFound(NotFound): + def __init__(self, message, volume_id): + self.volume_id = volume_id + super(VolumeNotFound, self).__init__(message) + + +class SnapshotNotFound(NotFound): + def __init__(self, message, snapshot_id): + self.snapshot_id = snapshot_id + super(SnapshotNotFound, self).__init__(message) + + +class Duplicate(Error): + pass + + +class NotAuthorized(Error): + pass + + +class NotEmpty(Error): + pass + + +class Invalid(Error): + pass + + +class InvalidInputException(Error): + pass + + +class InvalidContentType(Error): + pass + + +class TimeoutException(Error): pass diff --git a/nova/volume/api.py b/nova/volume/api.py index 09befb647..c1af30de0 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -90,6 +90,15 @@ class API(base.Base): return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) + def get_snapshot(self, context, snapshot_id): + rv = self.db.snapshot_get(context, snapshot_id) + return dict(rv.iteritems()) + + def get_all_snapshots(self, context): + if context.is_admin: + return self.db.snapshot_get_all(context) + return self.db.snapshot_get_all_by_project(context, context.project_id) + def check_attach(self, context, volume_id): volume = self.get(context, volume_id) # TODO(vish): abstract status checking? @@ -110,3 +119,38 @@ class API(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "remove_volume", "args": {'volume_id': volume_id}}) + + def create_snapshot(self, context, volume_id, name, description): + volume = self.get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + + options = { + 'volume_id': volume_id, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'volume_size': volume['size'], + 'display_name': name, + 'display_description': description} + + snapshot = self.db.snapshot_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_snapshot", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume_id, + "snapshot_id": snapshot['id']}}) + return snapshot + + def delete_snapshot(self, context, snapshot_id): + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + self.db.snapshot_update(context, snapshot_id, {'status': 'deleting'}) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "delete_snapshot", + "args": {"topic": FLAGS.volume_topic, + "snapshot_id": snapshot_id}}) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 55307ad9b..31998e307 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -122,6 +122,14 @@ class VolumeDriver(object): (FLAGS.volume_group, volume['name'])) + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + raise NotImplementedError() + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + raise NotImplementedError() + def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 2178389ce..87fd3bf17 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -152,6 +152,48 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True + def create_snapshot(self, context, volume_id, snapshot_id): + """Creates and exports the snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + LOG.info(_("snapshot %s: creating"), snapshot_ref['name']) + + try: + snap_name = snapshot_ref['name'] + LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + model_update = self.driver.create_snapshot(snapshot_ref) + if model_update: + self.db.snapshot_update(context, snapshot_ref['id'], model_update) + + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'error'}) + raise + + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'available', + 'progress': '100%'}) + LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name']) + return snapshot_id + + def delete_snapshot(self, context, snapshot_id): + """Deletes and unexports snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + + try: + LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + self.driver.delete_snapshot(snapshot_ref) + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error_deleting'}) + raise + + self.db.snapshot_destroy(context, snapshot_id) + LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name']) + return True + def setup_compute_volume(self, context, volume_id): """Setup remote volume on compute host. -- cgit From dcda6be23c3797872c406f58578b05befd378c97 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:48:26 +0900 Subject: Add support for creating a snapshot of a nova volume with euca-create-snapshot. --- nova/api/ec2/__init__.py | 6 ++ nova/api/ec2/cloud.py | 52 ++++++++++++--- nova/db/api.py | 39 +++++++++++ nova/db/sqlalchemy/api.py | 77 ++++++++++++++++++++++ .../versions/015_add_volume_snapshot_support.py | 71 ++++++++++++++++++++ nova/db/sqlalchemy/models.py | 24 +++++++ nova/exception.py | 6 ++ nova/volume/api.py | 44 +++++++++++++ nova/volume/driver.py | 8 +++ nova/volume/manager.py | 42 ++++++++++++ 10 files changed, 361 insertions(+), 8 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py (limited to 'nova') diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a3c3b25a1..a89d65a38 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -331,6 +331,12 @@ class Executor(wsgi.Application): ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x') message = _('Volume %s not found') % ec2_id return self._error(req, context, type(ex).__name__, message) + except exception.SnapshotNotFound as ex: + LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), + context=context) + ec2_id = ec2utils.id_to_ec2_id(ex.snapshot_id, 'snap-%08x') + message = _('Snapshot %s not found') % ec2_id + return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index bd4c9dcd4..6daf299b9 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -283,14 +283,46 @@ class CloudController(object): owner=None, restorable_by=None, **kwargs): - return {'snapshotSet': [{'snapshotId': 'fixme', - 'volumeId': 'fixme', - 'status': 'fixme', - 'startTime': 'fixme', - 'progress': 'fixme', - 'ownerId': 'fixme', - 'volumeSize': 0, - 'description': 'fixme'}]} + if snapshot_id: + snapshots = [] + for ec2_id in snapshot_id: + internal_id = ec2utils.ec2_id_to_id(ec2_id) + snapshot = self.volume_api.get_snapshot(context, snapshot_id=internal_id) + snapshots.append(snapshot) + else: + snapshots = self.volume_api.get_all_snapshots(context) + snapshots = [self._format_snapshot(context, s) for s in snapshots] + return {'snapshotSet': snapshots} + + def _format_snapshot(self, context, snapshot): + s = {} + s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x') + s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], 'vol-%08x') + s['status'] = snapshot['status'] + s['startTime'] = snapshot['created_at'] + s['progress'] = snapshot['progress'] + s['ownerId'] = snapshot['project_id'] + s['volumeSize'] = snapshot['volume_size'] + s['description'] = snapshot['display_description'] + + s['display_name'] = snapshot['display_name'] + s['display_description'] = snapshot['display_description'] + return s + + def create_snapshot(self, context, volume_id, **kwargs): + LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) + volume_id = ec2utils.ec2_id_to_id(volume_id) + snapshot = self.volume_api.create_snapshot( + context, + volume_id=volume_id, + name=kwargs.get('display_name'), + description=kwargs.get('display_description')) + return {'snapshotSet': [self._format_snapshot(context, snapshot)]} + + def delete_snapshot(self, context, snapshot_id, **kwargs): + snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) + self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id) + return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) @@ -598,6 +630,10 @@ class CloudController(object): 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] + if volume.get('snapshot_id') != None: + v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], 'snap-%08x') + else: + v['snapshotId'] = None v['display_name'] = volume['display_name'] v['display_description'] = volume['display_description'] diff --git a/nova/db/api.py b/nova/db/api.py index 63901e94d..9fc4b8c0a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -46,6 +46,8 @@ flags.DEFINE_string('instance_name_template', 'instance-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') +flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', + 'Template string to be used to generate instance names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], @@ -867,6 +869,43 @@ def volume_update(context, volume_id, values): #################### +def snapshot_create(context, values): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_create(context, values) + + +def snapshot_destroy(context, snapshot_id): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_destroy(context, snapshot_id) + + +def snapshot_get(context, snapshot_id): + """Get a volume or raise if it does not exist.""" + return IMPL.snapshot_get(context, snapshot_id) + + +def snapshot_get_all(context): + """Get all volumes.""" + return IMPL.snapshot_get_all(context) + + +def snapshot_get_all_by_project(context, project_id): + """Get all volumes belonging to a project.""" + return IMPL.snapshot_get_all_by_project(context, project_id) + + +def snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + + """ + return IMPL.snapshot_update(context, snapshot_id, values) + + +#################### + + def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 646675a45..059a22cb9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1760,6 +1760,83 @@ def volume_update(context, volume_id, values): ################### +@require_context +def snapshot_create(context, values): + snapshot_ref = models.Snapshot() + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + return snapshot_ref + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def snapshot_get(context, snapshot_id, session=None): + if not session: + session = get_session() + result = None + + if is_admin_context(context): + result = session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + elif is_user_context(context): + result = session.query(models.Snapshot).\ + filter_by(project_id=context.project_id).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=False).\ + first() + if not result: + raise exception.SnapshotNotFound(_('Snapshot %s not found') % snapshot_id, + snapshot_id) + + return result + + +@require_admin_context +def snapshot_get_all(context): + session = get_session() + return session.query(models.Snapshot).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.Snapshot).\ + filter_by(project_id=project_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + + +################### + + @require_context def security_group_get_all(context): session = get_session() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py new file mode 100644 index 000000000..288f63e72 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + +meta = MetaData() + +snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + snapshots.create() + except Exception: + logging.info(repr(snapshots)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + snapshots.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f79d0f16c..9abe4d9ae 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -327,6 +327,30 @@ class Quota(BASE, NovaBase): metadata_items = Column(Integer) +class Snapshot(BASE, NovaBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'snapshots' + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return FLAGS.snapshot_name_template % self.id + + @property + def volume_name(self): + return FLAGS.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(Integer) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' diff --git a/nova/exception.py b/nova/exception.py index 4e2bbdbaf..7adc3d007 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -68,6 +68,12 @@ class VolumeNotFound(NotFound): super(VolumeNotFound, self).__init__(message) +class SnapshotNotFound(NotFound): + def __init__(self, message, snapshot_id): + self.snapshot_id = snapshot_id + super(SnapshotNotFound, self).__init__(message) + + class Duplicate(Error): pass diff --git a/nova/volume/api.py b/nova/volume/api.py index 4b4bb9dc5..f5285f31f 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -90,6 +90,15 @@ class API(base.Base): return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) + def get_snapshot(self, context, snapshot_id): + rv = self.db.snapshot_get(context, snapshot_id) + return dict(rv.iteritems()) + + def get_all_snapshots(self, context): + if context.is_admin: + return self.db.snapshot_get_all(context) + return self.db.snapshot_get_all_by_project(context, context.project_id) + def check_attach(self, context, volume_id): volume = self.get(context, volume_id) # TODO(vish): abstract status checking? @@ -103,3 +112,38 @@ class API(base.Base): # TODO(vish): abstract status checking? if volume['status'] == "available": raise exception.ApiError(_("Volume is already detached")) + + def create_snapshot(self, context, volume_id, name, description): + volume = self.get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + + options = { + 'volume_id': volume_id, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'volume_size': volume['size'], + 'display_name': name, + 'display_description': description} + + snapshot = self.db.snapshot_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_snapshot", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume_id, + "snapshot_id": snapshot['id']}}) + return snapshot + + def delete_snapshot(self, context, snapshot_id): + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + self.db.snapshot_update(context, snapshot_id, {'status': 'deleting'}) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "delete_snapshot", + "args": {"topic": FLAGS.volume_topic, + "snapshot_id": snapshot_id}}) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 55307ad9b..31998e307 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -122,6 +122,14 @@ class VolumeDriver(object): (FLAGS.volume_group, volume['name'])) + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + raise NotImplementedError() + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + raise NotImplementedError() + def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 2178389ce..87fd3bf17 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -152,6 +152,48 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True + def create_snapshot(self, context, volume_id, snapshot_id): + """Creates and exports the snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + LOG.info(_("snapshot %s: creating"), snapshot_ref['name']) + + try: + snap_name = snapshot_ref['name'] + LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + model_update = self.driver.create_snapshot(snapshot_ref) + if model_update: + self.db.snapshot_update(context, snapshot_ref['id'], model_update) + + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'error'}) + raise + + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'available', + 'progress': '100%'}) + LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name']) + return snapshot_id + + def delete_snapshot(self, context, snapshot_id): + """Deletes and unexports snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + + try: + LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + self.driver.delete_snapshot(snapshot_ref) + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error_deleting'}) + raise + + self.db.snapshot_destroy(context, snapshot_id) + LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name']) + return True + def setup_compute_volume(self, context, volume_id): """Setup remote volume on compute host. -- cgit From f76f2ee50f2407155a0aaefac3224e6af14e7d26 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:50:10 +0900 Subject: Add support for creating a Sheepdog snapshot. --- nova/volume/driver.py | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 31998e307..ba0a7efef 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,16 @@ class SheepdogDriver(VolumeDriver): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) + def create_snapshot(self, snapshot): + """Creates a sheepdog snapshot""" + self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'], + "sheepdog:%s" % snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes a sheepdog snapshot""" + self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'], + '-s', snapshot['name']) + def local_path(self, volume): return "sheepdog:%s" % volume['name'] -- cgit From aad857a18153792d96f300732c3bb5bb16aa02c3 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:50:10 +0900 Subject: Add support for creating a Sheepdog snapshot. --- nova/volume/driver.py | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 31998e307..ba0a7efef 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,16 @@ class SheepdogDriver(VolumeDriver): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) + def create_snapshot(self, snapshot): + """Creates a sheepdog snapshot""" + self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'], + "sheepdog:%s" % snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes a sheepdog snapshot""" + self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'], + '-s', snapshot['name']) + def local_path(self, volume): return "sheepdog:%s" % volume['name'] -- cgit From 2f3819628b6d3dea13a56ea6e93e02992b2e1f5f Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:01:25 +0900 Subject: Add support for creating a new volume from a existing snapshot with EC2 API. --- nova/api/ec2/cloud.py | 12 +++++- .../versions/016_add_snapshot_id_to_volumes.py | 48 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 2 + nova/volume/api.py | 12 +++++- nova/volume/driver.py | 4 ++ nova/volume/manager.py | 9 +++- 6 files changed, 81 insertions(+), 6 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index f5360af0b..aa15539ac 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -636,11 +636,19 @@ class CloudController(object): v['display_description'] = volume['display_description'] return v - def create_volume(self, context, size, **kwargs): - LOG.audit(_("Create volume of %s GB"), size, context=context) + def create_volume(self, context, **kwargs): + size = kwargs.get('size'); + if kwargs.get('snapshot_id') != None: + snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) + LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) + else: + snapshot_id = None + LOG.audit(_("Create volume of %s GB"), size, context=context) + volume = self.volume_api.create( context, size=size, + snapshot_id=snapshot_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py new file mode 100644 index 000000000..0a50123bf --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Column +# + +snapshot_id = Column('snapshot_id', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(snapshot_id) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 2e0ead5f9..ca762ca9f 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -287,6 +287,8 @@ class Volume(BASE, NovaBase): user_id = Column(String(255)) project_id = Column(String(255)) + snapshot_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? diff --git a/nova/volume/api.py b/nova/volume/api.py index c1af30de0..7fa80383b 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -39,7 +39,13 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, name, description): + def create(self, context, size, snapshot_id, name, description): + if snapshot_id != None: + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + size = snapshot['volume_size'] + if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" @@ -51,6 +57,7 @@ class API(base.Base): 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, + 'snapshot_id': snapshot_id, 'availability_zone': FLAGS.storage_availability_zone, 'status': "creating", 'attach_status': "detached", @@ -62,7 +69,8 @@ class API(base.Base): FLAGS.scheduler_topic, {"method": "create_volume", "args": {"topic": FLAGS.volume_topic, - "volume_id": volume['id']}}) + "volume_id": volume['id'], + "snapshot_id": snapshot_id}}) return volume def delete(self, context, volume_id): diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ba0a7efef..02b0d50f4 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -101,6 +101,10 @@ class VolumeDriver(object): volume['name'], FLAGS.volume_group) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + raise NotImplementedError() + def delete_volume(self, volume): """Deletes a logical volume.""" try: diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 87fd3bf17..7d47fc191 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -90,7 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volume(self, context, volume_id): + def create_volume(self, context, volume_id, snapshot_id): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) @@ -108,7 +108,12 @@ class VolumeManager(manager.SchedulerDependentManager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - model_update = self.driver.create_volume(volume_ref) + if snapshot_id == None: + model_update = self.driver.create_volume(volume_ref) + else: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + model_update = self.driver.create_volume_from_snapshot(volume_ref, + snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) -- cgit From 1018a60e3194e7e283cd89af28efd689623058a8 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:01:25 +0900 Subject: Add support for creating a new volume from a existing snapshot with EC2 API. --- nova/api/ec2/cloud.py | 12 +++++- .../versions/016_add_snapshot_id_to_volumes.py | 48 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 2 + nova/volume/api.py | 12 +++++- nova/volume/driver.py | 4 ++ nova/volume/manager.py | 9 +++- 6 files changed, 81 insertions(+), 6 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 6daf299b9..5d4d2ad27 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -639,11 +639,19 @@ class CloudController(object): v['display_description'] = volume['display_description'] return v - def create_volume(self, context, size, **kwargs): - LOG.audit(_("Create volume of %s GB"), size, context=context) + def create_volume(self, context, **kwargs): + size = kwargs.get('size'); + if kwargs.get('snapshot_id') != None: + snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) + LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) + else: + snapshot_id = None + LOG.audit(_("Create volume of %s GB"), size, context=context) + volume = self.volume_api.create( context, size=size, + snapshot_id=snapshot_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py new file mode 100644 index 000000000..0a50123bf --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Column +# + +snapshot_id = Column('snapshot_id', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(snapshot_id) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 9abe4d9ae..afc2ea4e4 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -287,6 +287,8 @@ class Volume(BASE, NovaBase): user_id = Column(String(255)) project_id = Column(String(255)) + snapshot_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? diff --git a/nova/volume/api.py b/nova/volume/api.py index f5285f31f..bd073964d 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -39,7 +39,13 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, name, description): + def create(self, context, size, snapshot_id, name, description): + if snapshot_id != None: + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + size = snapshot['volume_size'] + if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" @@ -51,6 +57,7 @@ class API(base.Base): 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, + 'snapshot_id': snapshot_id, 'availability_zone': FLAGS.storage_availability_zone, 'status': "creating", 'attach_status': "detached", @@ -62,7 +69,8 @@ class API(base.Base): FLAGS.scheduler_topic, {"method": "create_volume", "args": {"topic": FLAGS.volume_topic, - "volume_id": volume['id']}}) + "volume_id": volume['id'], + "snapshot_id": snapshot_id}}) return volume def delete(self, context, volume_id): diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ba0a7efef..02b0d50f4 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -101,6 +101,10 @@ class VolumeDriver(object): volume['name'], FLAGS.volume_group) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + raise NotImplementedError() + def delete_volume(self, volume): """Deletes a logical volume.""" try: diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 87fd3bf17..7d47fc191 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -90,7 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volume(self, context, volume_id): + def create_volume(self, context, volume_id, snapshot_id): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) @@ -108,7 +108,12 @@ class VolumeManager(manager.SchedulerDependentManager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - model_update = self.driver.create_volume(volume_ref) + if snapshot_id == None: + model_update = self.driver.create_volume(volume_ref) + else: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + model_update = self.driver.create_volume_from_snapshot(volume_ref, + snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) -- cgit From 1c7c53a9f40a88eb9def7ab9d706e7399ad5e65b Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:02:00 +0900 Subject: Add support for cloning a Sheepdog volume. --- nova/volume/driver.py | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 02b0d50f4..3f3caf37a 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,13 @@ class SheepdogDriver(VolumeDriver): "sheepdog:%s" % volume['name'], sizestr) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a sheepdog volume from a snapshot.""" + self._try_execute('qemu-img', 'create', '-b', + "sheepdog:%s:%s" % (snapshot['volume_name'], snapshot['name']), + "sheepdog:%s" % volume['name']) + + def delete_volume(self, volume): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) -- cgit From 5b670fe9bca9103642967bce609853704d0d1b88 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 21:02:00 +0900 Subject: Add support for cloning a Sheepdog volume. --- nova/volume/driver.py | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 02b0d50f4..3f3caf37a 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,13 @@ class SheepdogDriver(VolumeDriver): "sheepdog:%s" % volume['name'], sizestr) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a sheepdog volume from a snapshot.""" + self._try_execute('qemu-img', 'create', '-b', + "sheepdog:%s:%s" % (snapshot['volume_name'], snapshot['name']), + "sheepdog:%s" % volume['name']) + + def delete_volume(self, volume): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) -- cgit From 4eac8d2c4252eb866e99ef260c0c5d7df1d927d2 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 22 Apr 2011 12:47:09 -0400 Subject: Created new libvirt directory, moved libvirt_conn.py to libvirt/connection.py, moved libvirt templates, broke out firewall and network utilities. --- nova/tests/test_virt.py | 47 +- nova/virt/connection.py | 2 +- nova/virt/cpuinfo.xml.template | 9 - nova/virt/libvirt.xml.template | 122 -- nova/virt/libvirt/__init__.py | 0 nova/virt/libvirt/connection.py | 1527 ++++++++++++++++++++++ nova/virt/libvirt/cpuinfo.xml.template | 9 + nova/virt/libvirt/firewall.py | 630 ++++++++++ nova/virt/libvirt/libvirt.xml.template | 122 ++ nova/virt/libvirt/netutils.py | 95 ++ nova/virt/libvirt_conn.py | 2168 -------------------------------- 11 files changed, 2408 insertions(+), 2323 deletions(-) delete mode 100644 nova/virt/cpuinfo.xml.template delete mode 100644 nova/virt/libvirt.xml.template create mode 100644 nova/virt/libvirt/__init__.py create mode 100644 nova/virt/libvirt/connection.py create mode 100644 nova/virt/libvirt/cpuinfo.xml.template create mode 100644 nova/virt/libvirt/firewall.py create mode 100644 nova/virt/libvirt/libvirt.xml.template create mode 100644 nova/virt/libvirt/netutils.py delete mode 100644 nova/virt/libvirt_conn.py (limited to 'nova') diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 0a0c7a958..d770f2c11 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -34,7 +34,8 @@ from nova.auth import manager from nova.compute import manager as compute_manager from nova.compute import power_state from nova.db.sqlalchemy import models -from nova.virt import libvirt_conn +from nova.virt.libvirt import connection +from nova.virt.libvirt import firewall libvirt = None FLAGS = flags.FLAGS @@ -64,7 +65,7 @@ class CacheConcurrencyTestCase(test.TestCase): def test_same_fname_concurrency(self): """Ensures that the same fname cache runs at a sequentially""" - conn = libvirt_conn.LibvirtConnection + conn = connection.LibvirtConnection wait1 = eventlet.event.Event() done1 = eventlet.event.Event() eventlet.spawn(conn._cache_image, _concurrency, @@ -85,7 +86,7 @@ class CacheConcurrencyTestCase(test.TestCase): def test_different_fname_concurrency(self): """Ensures that two different fname caches are concurrent""" - conn = libvirt_conn.LibvirtConnection + conn = connection.LibvirtConnection wait1 = eventlet.event.Event() done1 = eventlet.event.Event() eventlet.spawn(conn._cache_image, _concurrency, @@ -106,7 +107,7 @@ class CacheConcurrencyTestCase(test.TestCase): class LibvirtConnTestCase(test.TestCase): def setUp(self): super(LibvirtConnTestCase, self).setUp() - libvirt_conn._late_load_cheetah() + connection._late_load_cheetah() self.flags(fake_call=True) self.manager = manager.AuthManager() @@ -152,8 +153,8 @@ class LibvirtConnTestCase(test.TestCase): return False global libvirt libvirt = __import__('libvirt') - libvirt_conn.libvirt = __import__('libvirt') - libvirt_conn.libxml2 = __import__('libxml2') + connection.libvirt = __import__('libvirt') + connection.libxml2 = __import__('libxml2') return True def create_fake_libvirt_mock(self, **kwargs): @@ -163,7 +164,7 @@ class LibvirtConnTestCase(test.TestCase): class FakeLibvirtConnection(object): pass - # A fake libvirt_conn.IptablesFirewallDriver + # A fake connection.IptablesFirewallDriver class FakeIptablesFirewallDriver(object): def __init__(self, **kwargs): @@ -179,11 +180,11 @@ class LibvirtConnTestCase(test.TestCase): for key, val in kwargs.items(): fake.__setattr__(key, val) - # Inevitable mocks for libvirt_conn.LibvirtConnection - self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class') - libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - libvirt_conn.LibvirtConnection._conn = fake + # Inevitable mocks for connection.LibvirtConnection + self.mox.StubOutWithMock(connection.utils, 'import_class') + connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn = fake def create_service(self, **kwargs): service_ref = {'host': kwargs.get('host', 'dummy'), @@ -247,7 +248,7 @@ class LibvirtConnTestCase(test.TestCase): 'instance_id': instance_ref['id']}) self.flags(libvirt_type='lxc') - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, 'lxc:///') @@ -359,7 +360,7 @@ class LibvirtConnTestCase(test.TestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, expected_uri) @@ -386,7 +387,7 @@ class LibvirtConnTestCase(test.TestCase): FLAGS.libvirt_uri = testuri for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, testuri) db.instance_destroy(user_context, instance_ref['id']) @@ -410,13 +411,13 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock(getVersion=getVersion, getType=getType, listDomainsID=listDomainsID) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + self.mox.StubOutWithMock(connection.LibvirtConnection, 'get_cpu_info') - libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') + connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') # Start test self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.update_available_resource(self.context, 'dummy') service_ref = db.service_get(self.context, service_ref['id']) compute_node = service_ref['compute_node'][0] @@ -450,7 +451,7 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock() self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) self.assertRaises(exception.Invalid, conn.update_available_resource, self.context, 'dummy') @@ -485,7 +486,7 @@ class LibvirtConnTestCase(test.TestCase): # Start test self.mox.ReplayAll() try: - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none) conn.firewall_driver.setattr('instance_filter_exists', fake_none) @@ -534,7 +535,7 @@ class LibvirtConnTestCase(test.TestCase): # Start test self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn._live_migration, self.context, instance_ref, 'dest', '', @@ -569,7 +570,7 @@ class IptablesFirewallTestCase(test.TestCase): class FakeLibvirtConnection(object): pass self.fake_libvirt_connection = FakeLibvirtConnection() - self.fw = libvirt_conn.IptablesFirewallDriver( + self.fw = firewall.IptablesFirewallDriver( get_connection=lambda: self.fake_libvirt_connection) def tearDown(self): @@ -746,7 +747,7 @@ class NWFilterTestCase(test.TestCase): self.fake_libvirt_connection = Mock() - self.fw = libvirt_conn.NWFilterFirewall( + self.fw = firewall.NWFilterFirewall( lambda: self.fake_libvirt_connection) def tearDown(self): diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 99a8849f1..aeec17c98 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -27,9 +27,9 @@ from nova import utils from nova.virt import driver from nova.virt import fake from nova.virt import hyperv -from nova.virt import libvirt_conn from nova.virt import vmwareapi_conn from nova.virt import xenapi_conn +from nova.virt.libvirt import connection as libvirt_conn LOG = logging.getLogger("nova.virt.connection") diff --git a/nova/virt/cpuinfo.xml.template b/nova/virt/cpuinfo.xml.template deleted file mode 100644 index 48842b29d..000000000 --- a/nova/virt/cpuinfo.xml.template +++ /dev/null @@ -1,9 +0,0 @@ - - $arch - $model - $vendor - -#for $var in $features - -#end for - diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template deleted file mode 100644 index de2497a76..000000000 --- a/nova/virt/libvirt.xml.template +++ /dev/null @@ -1,122 +0,0 @@ - - ${name} - ${memory_kb} - -#if $type == 'lxc' - #set $disk_prefix = '' - #set $disk_bus = '' - exe - /sbin/init -#else if $type == 'uml' - #set $disk_prefix = 'ubd' - #set $disk_bus = 'uml' - uml - /usr/bin/linux - /dev/ubda -#else - #if $type == 'xen' - #set $disk_prefix = 'sd' - #set $disk_bus = 'scsi' - linux - /dev/xvda - #else - #set $disk_prefix = 'vd' - #set $disk_bus = 'virtio' - hvm - #end if - #if $getVar('rescue', False) - ${basepath}/kernel.rescue - ${basepath}/ramdisk.rescue - #else - #if $getVar('kernel', None) - ${kernel} - #if $type == 'xen' - ro - #else - root=/dev/vda console=ttyS0 - #end if - #if $getVar('ramdisk', None) - ${ramdisk} - #end if - #else - - #end if - #end if -#end if - - - - - ${vcpus} - -#if $type == 'lxc' - - - - -#else - #if $getVar('rescue', False) - - - - - - - - - - - #else - - - - - - #if $getVar('local', False) - - - - - - #end if - #end if -#end if - -#for $nic in $nics - - - - - - - -#if $getVar('nic.extra_params', False) - ${nic.extra_params} -#end if -#if $getVar('nic.gateway_v6', False) - -#end if - - -#end for - - - - - - - - - - - - - - - - -#if $getVar('vncserver_host', False) - -#end if - - diff --git a/nova/virt/libvirt/__init__.py b/nova/virt/libvirt/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py new file mode 100644 index 000000000..972ac1bb9 --- /dev/null +++ b/nova/virt/libvirt/connection.py @@ -0,0 +1,1527 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A connection to a hypervisor through libvirt. + +Supports KVM, LXC, QEMU, UML, and XEN. + +**Related Flags** + +:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen + (default: kvm). +:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). +:libvirt_xml_template: Libvirt XML Template. +:rescue_image_id: Rescue ami image (default: ami-rescue). +:rescue_kernel_id: Rescue aki image (default: aki-rescue). +:rescue_ramdisk_id: Rescue ari image (default: ari-rescue). +:injected_network_template: Template file for injected network +:allow_project_net_traffic: Whether to allow in project network traffic + +""" + +import multiprocessing +import os +import random +import shutil +import subprocess +import sys +import tempfile +import time +import uuid +from xml.dom import minidom +from xml.etree import ElementTree + +from eventlet import greenthread +from eventlet import tpool + +import IPy + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils +from nova import vnc +from nova.auth import manager +from nova.compute import instance_types +from nova.compute import power_state +from nova.virt import disk +from nova.virt import driver +from nova.virt import images +from nova.virt.libvirt import netutils + + +libvirt = None +libxml2 = None +Template = None + + +LOG = logging.getLogger('nova.virt.libvirt_conn') + + +FLAGS = flags.FLAGS +flags.DECLARE('live_migration_retry_count', 'nova.compute.manager') +# TODO(vish): These flags should probably go into a shared location +flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') +flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') +flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') +flags.DEFINE_string('libvirt_xml_template', + utils.abspath('virt/libvirt/libvirt.xml.template'), + 'Libvirt XML Template') +flags.DEFINE_string('libvirt_type', + 'kvm', + 'Libvirt domain type (valid options are: ' + 'kvm, lxc, qemu, uml, xen)') +flags.DEFINE_string('libvirt_uri', + '', + 'Override the default libvirt URI (which is dependent' + ' on libvirt_type)') +flags.DEFINE_bool('allow_project_net_traffic', + True, + 'Whether to allow in project network traffic') +flags.DEFINE_bool('use_cow_images', + True, + 'Whether to use cow images') +flags.DEFINE_string('ajaxterm_portrange', + '10000-12000', + 'Range of ports that ajaxterm should randomly try to bind') +flags.DEFINE_string('firewall_driver', + 'nova.virt.libvirt.firewall.IptablesFirewallDriver', + 'Firewall driver (defaults to iptables)') +flags.DEFINE_string('cpuinfo_xml_template', + utils.abspath('virt/libvirt/cpuinfo.xml.template'), + 'CpuInfo XML Template (Used only live migration now)') +flags.DEFINE_string('live_migration_uri', + "qemu+tcp://%s/system", + 'Define protocol used by live_migration feature') +flags.DEFINE_string('live_migration_flag', + "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", + 'Define live migration behavior.') +flags.DEFINE_integer('live_migration_bandwidth', 0, + 'Define live migration behavior') +flags.DEFINE_string('qemu_img', 'qemu-img', + 'binary to use for qemu-img commands') +flags.DEFINE_bool('start_guests_on_host_boot', False, + 'Whether to restart guests when the host reboots') + + +def get_connection(read_only): + # These are loaded late so that there's no need to install these + # libraries when not using libvirt. + # Cheetah is separate because the unit tests want to load Cheetah, + # but not libvirt. + global libvirt + global libxml2 + if libvirt is None: + libvirt = __import__('libvirt') + if libxml2 is None: + libxml2 = __import__('libxml2') + _late_load_cheetah() + return LibvirtConnection(read_only) + + +def _late_load_cheetah(): + global Template + if Template is None: + t = __import__('Cheetah.Template', globals(), locals(), + ['Template'], -1) + Template = t.Template + + +class LibvirtConnection(driver.ComputeDriver): + + def __init__(self, read_only): + super(LibvirtConnection, self).__init__() + self.libvirt_uri = self.get_uri() + + self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() + self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read() + self._wrapped_conn = None + self.read_only = read_only + + fw_class = utils.import_class(FLAGS.firewall_driver) + self.firewall_driver = fw_class(get_connection=self._get_connection) + + def init_host(self, host): + # Adopt existing VM's running here + ctxt = context.get_admin_context() + for instance in db.instance_get_all_by_host(ctxt, host): + try: + LOG.debug(_('Checking state of %s'), instance['name']) + state = self.get_info(instance['name'])['state'] + except exception.NotFound: + state = power_state.SHUTOFF + + LOG.debug(_('Current state of %(name)s was %(state)s.'), + {'name': instance['name'], 'state': state}) + db.instance_set_state(ctxt, instance['id'], state) + + # NOTE(justinsb): We no longer delete SHUTOFF instances, + # the user may want to power them back on + + if state != power_state.RUNNING: + continue + self.firewall_driver.prepare_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance) + + def _get_connection(self): + if not self._wrapped_conn or not self._test_connection(): + LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri) + self._wrapped_conn = self._connect(self.libvirt_uri, + self.read_only) + return self._wrapped_conn + _conn = property(_get_connection) + + def _test_connection(self): + try: + self._wrapped_conn.getInfo() + return True + except libvirt.libvirtError as e: + if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ + e.get_error_domain() == libvirt.VIR_FROM_REMOTE: + LOG.debug(_('Connection to libvirt broke')) + return False + raise + + def get_uri(self): + if FLAGS.libvirt_type == 'uml': + uri = FLAGS.libvirt_uri or 'uml:///system' + elif FLAGS.libvirt_type == 'xen': + uri = FLAGS.libvirt_uri or 'xen:///' + elif FLAGS.libvirt_type == 'lxc': + uri = FLAGS.libvirt_uri or 'lxc:///' + else: + uri = FLAGS.libvirt_uri or 'qemu:///system' + return uri + + def _connect(self, uri, read_only): + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], + 'root', + None] + + if read_only: + return libvirt.openReadOnly(uri) + else: + return libvirt.openAuth(uri, auth, 0) + + def list_instances(self): + return [self._conn.lookupByID(x).name() + for x in self._conn.listDomainsID()] + + def _map_to_instance_info(self, domain): + """Gets info from a virsh domain object into an InstanceInfo""" + + # domain.info() returns a list of: + # state: one of the state values (virDomainState) + # maxMemory: the maximum memory used by the domain + # memory: the current amount of memory used by the domain + # nbVirtCPU: the number of virtual CPU + # puTime: the time used by the domain in nanoseconds + + (state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info() + name = domain.name() + + return driver.InstanceInfo(name, state) + + def list_instances_detail(self): + infos = [] + for domain_id in self._conn.listDomainsID(): + domain = self._conn.lookupByID(domain_id) + info = self._map_to_instance_info(domain) + infos.append(info) + return infos + + def destroy(self, instance, cleanup=True): + instance_name = instance['name'] + + try: + virt_dom = self._lookup_by_name(instance_name) + except exception.NotFound: + virt_dom = None + + # If the instance is already terminated, we're still happy + # Otherwise, destroy it + if virt_dom is not None: + try: + virt_dom.destroy() + except libvirt.libvirtError as e: + is_okay = False + errcode = e.get_error_code() + if errcode == libvirt.VIR_ERR_OPERATION_INVALID: + # If the instance if already shut off, we get this: + # Code=55 Error=Requested operation is not valid: + # domain is not running + (state, _max_mem, _mem, _cpus, _t) = virt_dom.info() + if state == power_state.SHUTOFF: + is_okay = True + + if not is_okay: + LOG.warning(_("Error from libvirt during destroy of " + "%(instance_name)s. Code=%(errcode)s " + "Error=%(e)s") % + locals()) + raise + + try: + # NOTE(justinsb): We remove the domain definition. We probably + # would do better to keep it if cleanup=False (e.g. volumes?) + # (e.g. #2 - not losing machines on failure) + virt_dom.undefine() + except libvirt.libvirtError as e: + errcode = e.get_error_code() + LOG.warning(_("Error from libvirt during undefine of " + "%(instance_name)s. Code=%(errcode)s " + "Error=%(e)s") % + locals()) + raise + + def _wait_for_destroy(): + """Called at an interval until the VM is gone.""" + instance_name = instance['name'] + + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("Instance %s destroyed successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_destroy) + timer.start(interval=0.5, now=True) + + self.firewall_driver.unfilter_instance(instance) + + if cleanup: + self._cleanup(instance) + + return True + + def _cleanup(self, instance): + target = os.path.join(FLAGS.instances_path, instance['name']) + instance_name = instance['name'] + LOG.info(_('instance %(instance_name)s: deleting instance files' + ' %(target)s') % locals()) + if FLAGS.libvirt_type == 'lxc': + disk.destroy_container(target, instance, nbd=FLAGS.use_cow_images) + if os.path.exists(target): + shutil.rmtree(target) + + @exception.wrap_exception + def attach_volume(self, instance_name, device_path, mountpoint): + virt_dom = self._lookup_by_name(instance_name) + mount_device = mountpoint.rpartition("/")[2] + if device_path.startswith('/dev/'): + xml = """ + + + + """ % (device_path, mount_device) + elif ':' in device_path: + (protocol, name) = device_path.split(':') + xml = """ + + + + """ % (protocol, + name, + mount_device) + else: + raise exception.Invalid(_("Invalid device path %s") % device_path) + + virt_dom.attachDevice(xml) + + def _get_disk_xml(self, xml, device): + """Returns the xml for the disk mounted at device""" + try: + doc = libxml2.parseDoc(xml) + except: + return None + ctx = doc.xpathNewContext() + try: + ret = ctx.xpathEval('/domain/devices/disk') + for node in ret: + for child in node.children: + if child.name == 'target': + if child.prop('dev') == device: + return str(node) + finally: + if ctx is not None: + ctx.xpathFreeContext() + if doc is not None: + doc.freeDoc() + + @exception.wrap_exception + def detach_volume(self, instance_name, mountpoint): + virt_dom = self._lookup_by_name(instance_name) + mount_device = mountpoint.rpartition("/")[2] + xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) + if not xml: + raise exception.NotFound(_("No disk at %s") % mount_device) + virt_dom.detachDevice(xml) + + @exception.wrap_exception + def snapshot(self, instance, image_id): + """Create snapshot from a running VM instance. + + This command only works with qemu 0.14+, the qemu_img flag is + provided so that a locally compiled binary of qemu-img can be used + to support this command. + + """ + image_service = utils.import_object(FLAGS.image_service) + virt_dom = self._lookup_by_name(instance['name']) + elevated = context.get_admin_context() + + base = image_service.show(elevated, instance['image_id']) + + metadata = {'disk_format': base['disk_format'], + 'container_format': base['container_format'], + 'is_public': False, + 'name': '%s.%s' % (base['name'], image_id), + 'properties': {'architecture': base['architecture'], + 'kernel_id': instance['kernel_id'], + 'image_location': 'snapshot', + 'image_state': 'available', + 'owner_id': instance['project_id'], + 'ramdisk_id': instance['ramdisk_id'], + } + } + + # Make the snapshot + snapshot_name = uuid.uuid4().hex + snapshot_xml = """ + + %s + + """ % snapshot_name + snapshot_ptr = virt_dom.snapshotCreateXML(snapshot_xml, 0) + + # Find the disk + xml_desc = virt_dom.XMLDesc(0) + domain = ElementTree.fromstring(xml_desc) + source = domain.find('devices/disk/source') + disk_path = source.get('file') + + # Export the snapshot to a raw image + temp_dir = tempfile.mkdtemp() + out_path = os.path.join(temp_dir, snapshot_name) + qemu_img_cmd = (FLAGS.qemu_img, + 'convert', + '-f', + 'qcow2', + '-O', + 'raw', + '-s', + snapshot_name, + disk_path, + out_path) + utils.execute(*qemu_img_cmd) + + # Upload that image to the image service + with open(out_path) as image_file: + image_service.update(elevated, + image_id, + metadata, + image_file) + + # Clean up + shutil.rmtree(temp_dir) + + @exception.wrap_exception + def reboot(self, instance): + """Reboot a virtual machine, given an instance reference. + + This method actually destroys and re-creates the domain to ensure the + reboot happens, as the guest OS cannot ignore this action. + + """ + self.destroy(instance, False) + xml = self.to_xml(instance) + self.firewall_driver.setup_basic_filtering(instance) + self.firewall_driver.prepare_instance_filter(instance) + self._create_new_domain(xml) + self.firewall_driver.apply_instance_filter(instance) + + def _wait_for_reboot(): + """Called at an interval until the VM is running again.""" + instance_name = instance['name'] + + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s rebooted successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_reboot) + return timer.start(interval=0.5, now=True) + + @exception.wrap_exception + def pause(self, instance, callback): + raise exception.ApiError("pause not supported for libvirt.") + + @exception.wrap_exception + def unpause(self, instance, callback): + raise exception.ApiError("unpause not supported for libvirt.") + + @exception.wrap_exception + def suspend(self, instance, callback): + raise exception.ApiError("suspend not supported for libvirt") + + @exception.wrap_exception + def resume(self, instance, callback): + raise exception.ApiError("resume not supported for libvirt") + + @exception.wrap_exception + def rescue(self, instance): + """Loads a VM using rescue images. + + A rescue is normally performed when something goes wrong with the + primary images and data needs to be corrected/recovered. Rescuing + should not edit or over-ride the original image, only allow for + data recovery. + + """ + self.destroy(instance, False) + + xml = self.to_xml(instance, rescue=True) + rescue_images = {'image_id': FLAGS.rescue_image_id, + 'kernel_id': FLAGS.rescue_kernel_id, + 'ramdisk_id': FLAGS.rescue_ramdisk_id} + self._create_image(instance, xml, '.rescue', rescue_images) + self._create_new_domain(xml) + + def _wait_for_rescue(): + """Called at an interval until the VM is running again.""" + instance_name = instance['name'] + + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s rescued successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_rescue) + return timer.start(interval=0.5, now=True) + + @exception.wrap_exception + def unrescue(self, instance): + """Reboot the VM which is being rescued back into primary images. + + Because reboot destroys and re-creates instances, unresue should + simply call reboot. + + """ + self.reboot(instance) + + @exception.wrap_exception + def poll_rescued_instances(self, timeout): + pass + + # NOTE(ilyaalekseyev): Implementation like in multinics + # for xenapi(tr3buchet) + @exception.wrap_exception + def spawn(self, instance, network_info=None): + xml = self.to_xml(instance, False, network_info) + self.firewall_driver.setup_basic_filtering(instance, network_info) + self.firewall_driver.prepare_instance_filter(instance, network_info) + self._create_image(instance, xml, network_info) + domain = self._create_new_domain(xml) + LOG.debug(_("instance %s: is running"), instance['name']) + self.firewall_driver.apply_instance_filter(instance) + + if FLAGS.start_guests_on_host_boot: + LOG.debug(_("instance %s: setting autostart ON") % + instance['name']) + domain.setAutostart(1) + + def _wait_for_boot(): + """Called at an interval until the VM is running.""" + instance_name = instance['name'] + + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s spawned successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_boot) + return timer.start(interval=0.5, now=True) + + def _flush_xen_console(self, virsh_output): + LOG.info(_('virsh said: %r'), virsh_output) + virsh_output = virsh_output[0].strip() + + if virsh_output.startswith('/dev/'): + LOG.info(_("cool, it's a device")) + out, err = utils.execute('sudo', 'dd', + "if=%s" % virsh_output, + 'iflag=nonblock', + check_exit_code=False) + return out + else: + return '' + + def _append_to_file(self, data, fpath): + LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals()) + fp = open(fpath, 'a+') + fp.write(data) + return fpath + + def _dump_file(self, fpath): + fp = open(fpath, 'r+') + contents = fp.read() + LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals()) + return contents + + @exception.wrap_exception + def get_console_output(self, instance): + console_log = os.path.join(FLAGS.instances_path, instance['name'], + 'console.log') + + utils.execute('sudo', 'chown', os.getuid(), console_log) + + if FLAGS.libvirt_type == 'xen': + # Xen is special + virsh_output = utils.execute('virsh', 'ttyconsole', + instance['name']) + data = self._flush_xen_console(virsh_output) + fpath = self._append_to_file(data, console_log) + elif FLAGS.libvirt_type == 'lxc': + # LXC is also special + LOG.info(_("Unable to read LXC console")) + else: + fpath = console_log + + return self._dump_file(fpath) + + @exception.wrap_exception + def get_ajax_console(self, instance): + def get_open_port(): + start_port, end_port = FLAGS.ajaxterm_portrange.split("-") + for i in xrange(0, 100): # don't loop forever + port = random.randint(int(start_port), int(end_port)) + # netcat will exit with 0 only if the port is in use, + # so a nonzero return value implies it is unused + cmd = 'netcat', '0.0.0.0', port, '-w', '1' + try: + stdout, stderr = utils.execute(*cmd, process_input='') + except exception.ProcessExecutionError: + return port + raise Exception(_('Unable to find an open port')) + + def get_pty_for_instance(instance_name): + virt_dom = self._lookup_by_name(instance_name) + xml = virt_dom.XMLDesc(0) + dom = minidom.parseString(xml) + + for serial in dom.getElementsByTagName('serial'): + if serial.getAttribute('type') == 'pty': + source = serial.getElementsByTagName('source')[0] + return source.getAttribute('path') + + port = get_open_port() + token = str(uuid.uuid4()) + host = instance['host'] + + ajaxterm_cmd = 'sudo socat - %s' \ + % get_pty_for_instance(instance['name']) + + cmd = '%s/tools/ajaxterm/ajaxterm.py --command "%s" -t %s -p %s' \ + % (utils.novadir(), ajaxterm_cmd, token, port) + + subprocess.Popen(cmd, shell=True) + return {'token': token, 'host': host, 'port': port} + + @exception.wrap_exception + def get_vnc_console(self, instance): + def get_vnc_port_for_instance(instance_name): + virt_dom = self._lookup_by_name(instance_name) + xml = virt_dom.XMLDesc(0) + # TODO: use etree instead of minidom + dom = minidom.parseString(xml) + + for graphic in dom.getElementsByTagName('graphics'): + if graphic.getAttribute('type') == 'vnc': + return graphic.getAttribute('port') + + port = get_vnc_port_for_instance(instance['name']) + token = str(uuid.uuid4()) + host = instance['host'] + + return {'token': token, 'host': host, 'port': port} + + @staticmethod + def _cache_image(fn, target, fname, cow=False, *args, **kwargs): + """Wrapper for a method that creates an image that caches the image. + + This wrapper will save the image into a common store and create a + copy for use by the hypervisor. + + The underlying method should specify a kwarg of target representing + where the image will be saved. + + fname is used as the filename of the base image. The filename needs + to be unique to a given image. + + If cow is True, it will make a CoW image instead of a copy. + """ + if not os.path.exists(target): + base_dir = os.path.join(FLAGS.instances_path, '_base') + if not os.path.exists(base_dir): + os.mkdir(base_dir) + base = os.path.join(base_dir, fname) + + @utils.synchronized(fname) + def call_if_not_exists(base, fn, *args, **kwargs): + if not os.path.exists(base): + fn(target=base, *args, **kwargs) + + call_if_not_exists(base, fn, *args, **kwargs) + + if cow: + utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o', + 'cluster_size=2M,backing_file=%s' % base, + target) + else: + utils.execute('cp', base, target) + + def _fetch_image(self, target, image_id, user, project, size=None): + """Grab image and optionally attempt to resize it""" + images.fetch(image_id, target, user, project) + if size: + disk.extend(target, size) + + def _create_local(self, target, local_gb): + """Create a blank image of specified size""" + utils.execute('truncate', target, '-s', "%dG" % local_gb) + # TODO(vish): should we format disk by default? + + def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, + network_info=None): + if not network_info: + network_info = netutils.get_network_info(inst) + + if not suffix: + suffix = '' + + # syntactic nicety + def basepath(fname='', suffix=suffix): + return os.path.join(FLAGS.instances_path, + inst['name'], + fname + suffix) + + # ensure directories exist and are writable + utils.execute('mkdir', '-p', basepath(suffix='')) + + LOG.info(_('instance %s: Creating image'), inst['name']) + f = open(basepath('libvirt.xml'), 'w') + f.write(libvirt_xml) + f.close() + + if FLAGS.libvirt_type == 'lxc': + container_dir = '%s/rootfs' % basepath(suffix='') + utils.execute('mkdir', '-p', container_dir) + + # NOTE(vish): No need add the suffix to console.log + os.close(os.open(basepath('console.log', ''), + os.O_CREAT | os.O_WRONLY, 0660)) + + user = manager.AuthManager().get_user(inst['user_id']) + project = manager.AuthManager().get_project(inst['project_id']) + + if not disk_images: + disk_images = {'image_id': inst['image_id'], + 'kernel_id': inst['kernel_id'], + 'ramdisk_id': inst['ramdisk_id']} + + if disk_images['kernel_id']: + fname = '%08x' % int(disk_images['kernel_id']) + self._cache_image(fn=self._fetch_image, + target=basepath('kernel'), + fname=fname, + image_id=disk_images['kernel_id'], + user=user, + project=project) + if disk_images['ramdisk_id']: + fname = '%08x' % int(disk_images['ramdisk_id']) + self._cache_image(fn=self._fetch_image, + target=basepath('ramdisk'), + fname=fname, + image_id=disk_images['ramdisk_id'], + user=user, + project=project) + + root_fname = '%08x' % int(disk_images['image_id']) + size = FLAGS.minimum_root_size + + inst_type_id = inst['instance_type_id'] + inst_type = instance_types.get_instance_type(inst_type_id) + if inst_type['name'] == 'm1.tiny' or suffix == '.rescue': + size = None + root_fname += "_sm" + + self._cache_image(fn=self._fetch_image, + target=basepath('disk'), + fname=root_fname, + cow=FLAGS.use_cow_images, + image_id=disk_images['image_id'], + user=user, + project=project, + size=size) + + if inst_type['local_gb']: + self._cache_image(fn=self._create_local, + target=basepath('disk.local'), + fname="local_%s" % inst_type['local_gb'], + cow=FLAGS.use_cow_images, + local_gb=inst_type['local_gb']) + + # For now, we assume that if we're not using a kernel, we're using a + # partitioned disk image where the target partition is the first + # partition + target_partition = None + if not inst['kernel_id']: + target_partition = "1" + + if FLAGS.libvirt_type == 'lxc': + target_partition = None + + if inst['key_data']: + key = str(inst['key_data']) + else: + key = None + net = None + + nets = [] + ifc_template = open(FLAGS.injected_network_template).read() + ifc_num = -1 + have_injected_networks = False + admin_context = context.get_admin_context() + for (network_ref, mapping) in network_info: + ifc_num += 1 + + if not network_ref['injected']: + continue + + have_injected_networks = True + address = mapping['ips'][0]['ip'] + address_v6 = None + if FLAGS.use_ipv6: + address_v6 = mapping['ip6s'][0]['ip'] + net_info = {'name': 'eth%d' % ifc_num, + 'address': address, + 'netmask': network_ref['netmask'], + 'gateway': network_ref['gateway'], + 'broadcast': network_ref['broadcast'], + 'dns': network_ref['dns'], + 'address_v6': address_v6, + 'gateway_v6': network_ref['gateway_v6'], + 'netmask_v6': network_ref['netmask_v6']} + nets.append(net_info) + + if have_injected_networks: + net = str(Template(ifc_template, + searchList=[{'interfaces': nets, + 'use_ipv6': FLAGS.use_ipv6}])) + + if key or net: + inst_name = inst['name'] + img_id = inst.image_id + if key: + LOG.info(_('instance %(inst_name)s: injecting key into' + ' image %(img_id)s') % locals()) + if net: + LOG.info(_('instance %(inst_name)s: injecting net into' + ' image %(img_id)s') % locals()) + try: + disk.inject_data(basepath('disk'), key, net, + partition=target_partition, + nbd=FLAGS.use_cow_images) + + if FLAGS.libvirt_type == 'lxc': + disk.setup_container(basepath('disk'), + container_dir=container_dir, + nbd=FLAGS.use_cow_images) + except Exception as e: + # This could be a windows image, or a vmdk format disk + LOG.warn(_('instance %(inst_name)s: ignoring error injecting' + ' data into image %(img_id)s (%(e)s)') % locals()) + + if FLAGS.libvirt_type == 'uml': + utils.execute('sudo', 'chown', 'root', basepath('disk')) + + def _get_nic_for_xml(self, network, mapping): + # Assume that the gateway also acts as the dhcp server. + dhcp_server = network['gateway'] + gateway_v6 = network['gateway_v6'] + mac_id = mapping['mac'].replace(':', '') + + if FLAGS.allow_project_net_traffic: + if FLAGS.use_ipv6: + net, mask = netutils.get_net_and_mask(network['cidr']) + net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen( + network['cidr_v6']) + extra_params = ("\n" + "\n" + "\n" + "\n") % \ + (net, mask, net_v6, prefixlen_v6) + else: + net, mask = netutils.get_net_and_mask(network['cidr']) + extra_params = ("\n" + "\n") % \ + (net, mask) + else: + extra_params = "\n" + + result = { + 'id': mac_id, + 'bridge_name': network['bridge'], + 'mac_address': mapping['mac'], + 'ip_address': mapping['ips'][0]['ip'], + 'dhcp_server': dhcp_server, + 'extra_params': extra_params, + } + + if gateway_v6: + result['gateway_v6'] = gateway_v6 + "/128" + + return result + + def to_xml(self, instance, rescue=False, network_info=None): + # TODO(termie): cache? + LOG.debug(_('instance %s: starting toXML method'), instance['name']) + + # TODO(adiantum) remove network_info creation code + # when multinics will be completed + if not network_info: + network_info = netutils.get_network_info(instance) + + nics = [] + for (network, mapping) in network_info: + nics.append(self._get_nic_for_xml(network, + mapping)) + # FIXME(vish): stick this in db + inst_type_id = instance['instance_type_id'] + inst_type = instance_types.get_instance_type(inst_type_id) + + if FLAGS.use_cow_images: + driver_type = 'qcow2' + else: + driver_type = 'raw' + + xml_info = {'type': FLAGS.libvirt_type, + 'name': instance['name'], + 'basepath': os.path.join(FLAGS.instances_path, + instance['name']), + 'memory_kb': inst_type['memory_mb'] * 1024, + 'vcpus': inst_type['vcpus'], + 'rescue': rescue, + 'local': inst_type['local_gb'], + 'driver_type': driver_type, + 'nics': nics} + + if FLAGS.vnc_enabled: + if FLAGS.libvirt_type != 'lxc': + xml_info['vncserver_host'] = FLAGS.vncserver_host + if not rescue: + if instance['kernel_id']: + xml_info['kernel'] = xml_info['basepath'] + "/kernel" + + if instance['ramdisk_id']: + xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" + + xml_info['disk'] = xml_info['basepath'] + "/disk" + + xml = str(Template(self.libvirt_xml, searchList=[xml_info])) + LOG.debug(_('instance %s: finished toXML method'), + instance['name']) + return xml + + def _lookup_by_name(self, instance_name): + """Retrieve libvirt domain object given an instance name. + + All libvirt error handling should be handled in this method and + relevant nova exceptions should be raised in response. + + """ + try: + return self._conn.lookupByName(instance_name) + except libvirt.libvirtError as ex: + error_code = ex.get_error_code() + if error_code == libvirt.VIR_ERR_NO_DOMAIN: + msg = _("Instance %s not found") % instance_name + raise exception.NotFound(msg) + + msg = _("Error from libvirt while looking up %(instance_name)s: " + "[Error Code %(error_code)s] %(ex)s") % locals() + raise exception.Error(msg) + + def get_info(self, instance_name): + """Retrieve information from libvirt for a specific instance name. + + If a libvirt error is encountered during lookup, we might raise a + NotFound exception or Error exception depending on how severe the + libvirt error is. + + """ + virt_dom = self._lookup_by_name(instance_name) + (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() + return {'state': state, + 'max_mem': max_mem, + 'mem': mem, + 'num_cpu': num_cpu, + 'cpu_time': cpu_time} + + def _create_new_domain(self, xml, persistent=True, launch_flags=0): + # NOTE(justinsb): libvirt has two types of domain: + # * a transient domain disappears when the guest is shutdown + # or the host is rebooted. + # * a permanent domain is not automatically deleted + # NOTE(justinsb): Even for ephemeral instances, transient seems risky + + if persistent: + # To create a persistent domain, first define it, then launch it. + domain = self._conn.defineXML(xml) + + domain.createWithFlags(launch_flags) + else: + # createXML call creates a transient domain + domain = self._conn.createXML(xml, launch_flags) + + return domain + + def get_diagnostics(self, instance_name): + raise exception.ApiError(_("diagnostics are not supported " + "for libvirt")) + + def get_disks(self, instance_name): + """ + Note that this function takes an instance name, not an Instance, so + that it can be called by monitor. + + Returns a list of all block devices for this domain. + """ + domain = self._lookup_by_name(instance_name) + # TODO(devcamcar): Replace libxml2 with etree. + xml = domain.XMLDesc(0) + doc = None + + try: + doc = libxml2.parseDoc(xml) + except: + return [] + + ctx = doc.xpathNewContext() + disks = [] + + try: + ret = ctx.xpathEval('/domain/devices/disk') + + for node in ret: + devdst = None + + for child in node.children: + if child.name == 'target': + devdst = child.prop('dev') + + if devdst is None: + continue + + disks.append(devdst) + finally: + if ctx is not None: + ctx.xpathFreeContext() + if doc is not None: + doc.freeDoc() + + return disks + + def get_interfaces(self, instance_name): + """ + Note that this function takes an instance name, not an Instance, so + that it can be called by monitor. + + Returns a list of all network interfaces for this instance. + """ + domain = self._lookup_by_name(instance_name) + # TODO(devcamcar): Replace libxml2 with etree. + xml = domain.XMLDesc(0) + doc = None + + try: + doc = libxml2.parseDoc(xml) + except: + return [] + + ctx = doc.xpathNewContext() + interfaces = [] + + try: + ret = ctx.xpathEval('/domain/devices/interface') + + for node in ret: + devdst = None + + for child in node.children: + if child.name == 'target': + devdst = child.prop('dev') + + if devdst is None: + continue + + interfaces.append(devdst) + finally: + if ctx is not None: + ctx.xpathFreeContext() + if doc is not None: + doc.freeDoc() + + return interfaces + + def get_vcpu_total(self): + """Get vcpu number of physical computer. + + :returns: the number of cpu core. + + """ + + # On certain platforms, this will raise a NotImplementedError. + try: + return multiprocessing.cpu_count() + except NotImplementedError: + LOG.warn(_("Cannot get the number of cpu, because this " + "function is not implemented for this platform. " + "This error can be safely ignored for now.")) + return 0 + + def get_memory_mb_total(self): + """Get the total memory size(MB) of physical computer. + + :returns: the total amount of memory(MB). + + """ + + if sys.platform.upper() != 'LINUX2': + return 0 + + meminfo = open('/proc/meminfo').read().split() + idx = meminfo.index('MemTotal:') + # transforming kb to mb. + return int(meminfo[idx + 1]) / 1024 + + def get_local_gb_total(self): + """Get the total hdd size(GB) of physical computer. + + :returns: + The total amount of HDD(GB). + Note that this value shows a partition where + NOVA-INST-DIR/instances mounts. + + """ + + hddinfo = os.statvfs(FLAGS.instances_path) + return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024 + + def get_vcpu_used(self): + """ Get vcpu usage number of physical computer. + + :returns: The total number of vcpu that currently used. + + """ + + total = 0 + for dom_id in self._conn.listDomainsID(): + dom = self._conn.lookupByID(dom_id) + total += len(dom.vcpus()[1]) + return total + + def get_memory_mb_used(self): + """Get the free memory size(MB) of physical computer. + + :returns: the total usage of memory(MB). + + """ + + if sys.platform.upper() != 'LINUX2': + return 0 + + m = open('/proc/meminfo').read().split() + idx1 = m.index('MemFree:') + idx2 = m.index('Buffers:') + idx3 = m.index('Cached:') + avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024 + return self.get_memory_mb_total() - avail + + def get_local_gb_used(self): + """Get the free hdd size(GB) of physical computer. + + :returns: + The total usage of HDD(GB). + Note that this value shows a partition where + NOVA-INST-DIR/instances mounts. + + """ + + hddinfo = os.statvfs(FLAGS.instances_path) + avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024 + return self.get_local_gb_total() - avail + + def get_hypervisor_type(self): + """Get hypervisor type. + + :returns: hypervisor type (ex. qemu) + + """ + + return self._conn.getType() + + def get_hypervisor_version(self): + """Get hypervisor version. + + :returns: hypervisor version (ex. 12003) + + """ + + # NOTE(justinsb): getVersion moved between libvirt versions + # Trying to do be compatible with older versions is a lost cause + # But ... we can at least give the user a nice message + method = getattr(self._conn, 'getVersion', None) + if method is None: + raise exception.Error(_("libvirt version is too old" + " (does not support getVersion)")) + # NOTE(justinsb): If we wanted to get the version, we could: + # method = getattr(libvirt, 'getVersion', None) + # NOTE(justinsb): This would then rely on a proper version check + + return method() + + def get_cpu_info(self): + """Get cpuinfo information. + + Obtains cpu feature from virConnect.getCapabilities, + and returns as a json string. + + :return: see above description + + """ + + xml = self._conn.getCapabilities() + xml = libxml2.parseDoc(xml) + nodes = xml.xpathEval('//host/cpu') + if len(nodes) != 1: + raise exception.Invalid(_("Invalid xml. '' must be 1," + "but %d\n") % len(nodes) + + xml.serialize()) + + cpu_info = dict() + + arch_nodes = xml.xpathEval('//host/cpu/arch') + if arch_nodes: + cpu_info['arch'] = arch_nodes[0].getContent() + + model_nodes = xml.xpathEval('//host/cpu/model') + if model_nodes: + cpu_info['model'] = model_nodes[0].getContent() + + vendor_nodes = xml.xpathEval('//host/cpu/vendor') + if vendor_nodes: + cpu_info['vendor'] = vendor_nodes[0].getContent() + + topology_nodes = xml.xpathEval('//host/cpu/topology') + topology = dict() + if topology_nodes: + topology_node = topology_nodes[0].get_properties() + while topology_node: + name = topology_node.get_name() + topology[name] = topology_node.getContent() + topology_node = topology_node.get_next() + + keys = ['cores', 'sockets', 'threads'] + tkeys = topology.keys() + if set(tkeys) != set(keys): + ks = ', '.join(keys) + raise exception.Invalid(_("Invalid xml: topology" + "(%(topology)s) must have " + "%(ks)s") % locals()) + + feature_nodes = xml.xpathEval('//host/cpu/feature') + features = list() + for nodes in feature_nodes: + features.append(nodes.get_properties().getContent()) + + cpu_info['topology'] = topology + cpu_info['features'] = features + return utils.dumps(cpu_info) + + def block_stats(self, instance_name, disk): + """ + Note that this function takes an instance name, not an Instance, so + that it can be called by monitor. + """ + domain = self._lookup_by_name(instance_name) + return domain.blockStats(disk) + + def interface_stats(self, instance_name, interface): + """ + Note that this function takes an instance name, not an Instance, so + that it can be called by monitor. + """ + domain = self._lookup_by_name(instance_name) + return domain.interfaceStats(interface) + + def get_console_pool_info(self, console_type): + #TODO(mdragon): console proxy should be implemented for libvirt, + # in case someone wants to use it with kvm or + # such. For now return fake data. + return {'address': '127.0.0.1', + 'username': 'fakeuser', + 'password': 'fakepassword'} + + def refresh_security_group_rules(self, security_group_id): + self.firewall_driver.refresh_security_group_rules(security_group_id) + + def refresh_security_group_members(self, security_group_id): + self.firewall_driver.refresh_security_group_members(security_group_id) + + def update_available_resource(self, ctxt, host): + """Updates compute manager resource info on ComputeNode table. + + This method is called when nova-coompute launches, and + whenever admin executes "nova-manage service update_resource". + + :param ctxt: security context + :param host: hostname that compute manager is currently running + + """ + + try: + service_ref = db.service_get_all_compute_by_host(ctxt, host)[0] + except exception.NotFound: + raise exception.Invalid(_("Cannot update compute manager " + "specific info, because no service " + "record was found.")) + + # Updating host information + dic = {'vcpus': self.get_vcpu_total(), + 'memory_mb': self.get_memory_mb_total(), + 'local_gb': self.get_local_gb_total(), + 'vcpus_used': self.get_vcpu_used(), + 'memory_mb_used': self.get_memory_mb_used(), + 'local_gb_used': self.get_local_gb_used(), + 'hypervisor_type': self.get_hypervisor_type(), + 'hypervisor_version': self.get_hypervisor_version(), + 'cpu_info': self.get_cpu_info()} + + compute_node_ref = service_ref['compute_node'] + if not compute_node_ref: + LOG.info(_('Compute_service record created for %s ') % host) + dic['service_id'] = service_ref['id'] + db.compute_node_create(ctxt, dic) + else: + LOG.info(_('Compute_service record updated for %s ') % host) + db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic) + + def compare_cpu(self, cpu_info): + """Checks the host cpu is compatible to a cpu given by xml. + + "xml" must be a part of libvirt.openReadonly().getCapabilities(). + return values follows by virCPUCompareResult. + if 0 > return value, do live migration. + 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' + + :param cpu_info: json string that shows cpu feature(see get_cpu_info()) + :returns: + None. if given cpu info is not compatible to this server, + raise exception. + + """ + + LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info) + dic = utils.loads(cpu_info) + xml = str(Template(self.cpuinfo_xml, searchList=dic)) + LOG.info(_('to xml...\n:%s ' % xml)) + + u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult" + m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s") + # unknown character exists in xml, then libvirt complains + try: + ret = self._conn.compareCPU(xml, 0) + except libvirt.libvirtError, e: + ret = e.message + LOG.error(m % locals()) + raise + + if ret <= 0: + raise exception.Invalid(m % locals()) + + return + + def ensure_filtering_rules_for_instance(self, instance_ref, + time=None): + """Setting up filtering rules and waiting for its completion. + + To migrate an instance, filtering rules to hypervisors + and firewalls are inevitable on destination host. + ( Waiting only for filterling rules to hypervisor, + since filtering rules to firewall rules can be set faster). + + Concretely, the below method must be called. + - setup_basic_filtering (for nova-basic, etc.) + - prepare_instance_filter(for nova-instance-instance-xxx, etc.) + + to_xml may have to be called since it defines PROJNET, PROJMASK. + but libvirt migrates those value through migrateToURI(), + so , no need to be called. + + Don't use thread for this method since migration should + not be started when setting-up filtering rules operations + are not completed. + + :params instance_ref: nova.db.sqlalchemy.models.Instance object + + """ + + if not time: + time = greenthread + + # If any instances never launch at destination host, + # basic-filtering must be set here. + self.firewall_driver.setup_basic_filtering(instance_ref) + # setting up n)ova-instance-instance-xx mainly. + self.firewall_driver.prepare_instance_filter(instance_ref) + + # wait for completion + timeout_count = range(FLAGS.live_migration_retry_count) + while timeout_count: + if self.firewall_driver.instance_filter_exists(instance_ref): + break + timeout_count.pop() + if len(timeout_count) == 0: + msg = _('Timeout migrating for %s. nwfilter not found.') + raise exception.Error(msg % instance_ref.name) + time.sleep(1) + + def live_migration(self, ctxt, instance_ref, dest, + post_method, recover_method): + """Spawning live_migration operation for distributing high-load. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params dest: destination host + :params post_method: + post operation method. + expected nova.compute.manager.post_live_migration. + :params recover_method: + recovery method when any exception occurs. + expected nova.compute.manager.recover_live_migration. + + """ + + greenthread.spawn(self._live_migration, ctxt, instance_ref, dest, + post_method, recover_method) + + def _live_migration(self, ctxt, instance_ref, dest, + post_method, recover_method): + """Do live migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params dest: destination host + :params post_method: + post operation method. + expected nova.compute.manager.post_live_migration. + :params recover_method: + recovery method when any exception occurs. + expected nova.compute.manager.recover_live_migration. + + """ + + # Do live migration. + try: + flaglist = FLAGS.live_migration_flag.split(',') + flagvals = [getattr(libvirt, x.strip()) for x in flaglist] + logical_sum = reduce(lambda x, y: x | y, flagvals) + + if self.read_only: + tmpconn = self._connect(self.libvirt_uri, False) + dom = tmpconn.lookupByName(instance_ref.name) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) + tmpconn.close() + else: + dom = self._conn.lookupByName(instance_ref.name) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) + + except Exception: + recover_method(ctxt, instance_ref, dest=dest) + raise + + # Waiting for completion of live_migration. + timer = utils.LoopingCall(f=None) + + def wait_for_live_migration(): + """waiting for live migration completion""" + try: + self.get_info(instance_ref.name)['state'] + except exception.NotFound: + timer.stop() + post_method(ctxt, instance_ref, dest) + + timer.f = wait_for_live_migration + timer.start(interval=0.5, now=True) + + def unfilter_instance(self, instance_ref): + """See comments of same method in firewall_driver.""" + self.firewall_driver.unfilter_instance(instance_ref) diff --git a/nova/virt/libvirt/cpuinfo.xml.template b/nova/virt/libvirt/cpuinfo.xml.template new file mode 100644 index 000000000..48842b29d --- /dev/null +++ b/nova/virt/libvirt/cpuinfo.xml.template @@ -0,0 +1,9 @@ + + $arch + $model + $vendor + +#for $var in $features + +#end for + diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py new file mode 100644 index 000000000..99ba02aaa --- /dev/null +++ b/nova/virt/libvirt/firewall.py @@ -0,0 +1,630 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from eventlet import tpool + +from nova import context +from nova import db +from nova import flags +from nova import log as logging +from nova import utils +from nova.virt.libvirt import netutils + + +LOG = logging.getLogger("nova.virt.libvirt.firewall") +FLAGS = flags.FLAGS + + +try: + import libvirt +except ImportError: + LOG.warn(_("Libvirt module could not be loaded. NWFilterFirewall will " + "not work correctly.")) + + +class FirewallDriver(object): + + def prepare_instance_filter(self, instance, network_info=None): + """Prepare filters for the instance. + + At this point, the instance isn't running yet. + + """ + raise NotImplementedError() + + def unfilter_instance(self, instance): + """Stop filtering instance.""" + raise NotImplementedError() + + def apply_instance_filter(self, instance): + """Apply instance filter. + + Once this method returns, the instance should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_instance_filter. + + """ + raise NotImplementedError() + + def refresh_security_group_rules(self, security_group_id): + """Refresh security group rules from data store. + + Gets called when a rule has been added to or removed from + the security group. + + """ + raise NotImplementedError() + + def refresh_security_group_members(self, security_group_id): + """Refresh security group members from data store. + + Gets called when an instance gets added to or removed from + the security group. + + """ + raise NotImplementedError() + + def setup_basic_filtering(self, instance, network_info=None): + """Create rules to block spoofing and allow dhcp. + + This gets called when spawning an instance, before + :method:`prepare_instance_filter`. + + """ + raise NotImplementedError() + + def instance_filter_exists(self, instance): + """Check nova-instance-instance-xxx exists.""" + raise NotImplementedError() + + +class NWFilterFirewall(FirewallDriver): + """Network filter firewall implementation. + + This class implements a network filtering mechanism versatile + enough for EC2 style Security Group filtering by leveraging + libvirt's nwfilter. + + First, all instances get a filter ("nova-base-filter") applied. + This filter provides some basic security such as protection against + MAC spoofing, IP spoofing, and ARP spoofing. + + This filter drops all incoming ipv4 and ipv6 connections. + Outgoing connections are never blocked. + + Second, every security group maps to a nwfilter filter(*). + NWFilters can be updated at runtime and changes are applied + immediately, so changes to security groups can be applied at + runtime (as mandated by the spec). + + Security group rules are named "nova-secgroup-" where + is the internal id of the security group. They're applied only on + hosts that have instances in the security group in question. + + Updates to security groups are done by updating the data model + (in response to API calls) followed by a request sent to all + the nodes with instances in the security group to refresh the + security group. + + Each instance has its own NWFilter, which references the above + mentioned security group NWFilters. This was done because + interfaces can only reference one filter while filters can + reference multiple other filters. This has the added benefit of + actually being able to add and remove security groups from an + instance at run time. This functionality is not exposed anywhere, + though. + + Outstanding questions: + + The name is unique, so would there be any good reason to sync + the uuid across the nodes (by assigning it from the datamodel)? + + + (*) This sentence brought to you by the redundancy department of + redundancy. + + """ + + def __init__(self, get_connection, **kwargs): + self._libvirt_get_connection = get_connection + self.static_filters_configured = False + self.handle_security_groups = False + + def apply_instance_filter(self, instance): + pass + + def _get_connection(self): + return self._libvirt_get_connection() + + _conn = property(_get_connection) + + def nova_dhcp_filter(self): + """Defines nova DHCP filter. + + The standard allow-dhcp-server filter is an one, so it uses + ebtables to allow traffic through. Without a corresponding rule in + iptables, it'll get blocked anyway. + + """ + return ''' + 891e4787-e5c0-d59b-cbd6-41bc3c6b36fc + + + + + + + ''' + + def nova_ra_filter(self): + return ''' + d707fa71-4fb5-4b27-9ab7-ba5ca19c8804 + + + + ''' + + def setup_basic_filtering(self, instance, network_info=None): + """Set up basic filtering (MAC, IP, and ARP spoofing protection).""" + logging.info('called setup_basic_filtering in nwfilter') + + if not network_info: + network_info = netutils.get_network_info(instance) + + if self.handle_security_groups: + # No point in setting up a filter set that we'll be overriding + # anyway. + return + + logging.info('ensuring static filters') + self._ensure_static_filters() + + if instance['image_id'] == str(FLAGS.vpn_image_id): + base_filter = 'nova-vpn' + else: + base_filter = 'nova-base' + + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + self._define_filter(self._filter_container(instance_filter_name, + [base_filter])) + + def _ensure_static_filters(self): + if self.static_filters_configured: + return + + self._define_filter(self._filter_container('nova-base', + ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'])) + self._define_filter(self._filter_container('nova-vpn', + ['allow-dhcp-server'])) + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_ra_filter) + if FLAGS.allow_project_net_traffic: + self._define_filter(self.nova_project_filter) + if FLAGS.use_ipv6: + self._define_filter(self.nova_project_filter_v6) + + self.static_filters_configured = True + + def _filter_container(self, name, filters): + xml = '''%s''' % ( + name, + ''.join(["" % (f,) for f in filters])) + return xml + + def nova_base_ipv4_filter(self): + retval = "" + for protocol in ['tcp', 'udp', 'icmp']: + for direction, action, priority in [('out', 'accept', 399), + ('in', 'drop', 400)]: + retval += """ + <%s /> + """ % (action, direction, + priority, protocol) + retval += '' + return retval + + def nova_base_ipv6_filter(self): + retval = "" + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: + for direction, action, priority in [('out', 'accept', 399), + ('in', 'drop', 400)]: + retval += """ + <%s /> + """ % (action, direction, + priority, protocol) + retval += '' + return retval + + def nova_project_filter(self): + retval = "" + for protocol in ['tcp', 'udp', 'icmp']: + retval += """ + <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' /> + """ % protocol + retval += '' + return retval + + def nova_project_filter_v6(self): + retval = "" + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: + retval += """ + <%s srcipaddr='$PROJNETV6' + srcipmask='$PROJMASKV6' /> + """ % (protocol) + retval += '' + return retval + + def _define_filter(self, xml): + if callable(xml): + xml = xml() + # execute in a native thread and block current greenthread until done + tpool.execute(self._conn.nwfilterDefineXML, xml) + + def unfilter_instance(self, instance): + # Nothing to do + pass + + def prepare_instance_filter(self, instance, network_info=None): + """Creates an NWFilter for the given instance. + + In the process, it makes sure the filters for the security groups as + well as the base filter are all in place. + + """ + if not network_info: + network_info = netutils.get_network_info(instance) + if instance['image_id'] == str(FLAGS.vpn_image_id): + base_filter = 'nova-vpn' + else: + base_filter = 'nova-base' + + ctxt = context.get_admin_context() + + instance_secgroup_filter_name = \ + '%s-secgroup' % (self._instance_filter_name(instance)) + #% (instance_filter_name,) + + instance_secgroup_filter_children = ['nova-base-ipv4', + 'nova-base-ipv6', + 'nova-allow-dhcp-server'] + + for security_group in \ + db.security_group_get_by_instance(ctxt, instance['id']): + + self.refresh_security_group_rules(security_group['id']) + + instance_secgroup_filter_children += [('nova-secgroup-%s' % + security_group['id'])] + + self._define_filter( + self._filter_container(instance_secgroup_filter_name, + instance_secgroup_filter_children)) + + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + instance_filter_children = \ + [base_filter, instance_secgroup_filter_name] + + if FLAGS.use_ipv6: + gateway_v6 = network['gateway_v6'] + + if gateway_v6: + instance_secgroup_filter_children += \ + ['nova-allow-ra-server'] + + if FLAGS.allow_project_net_traffic: + instance_filter_children += ['nova-project'] + if FLAGS.use_ipv6: + instance_filter_children += ['nova-project-v6'] + + self._define_filter( + self._filter_container(instance_filter_name, + instance_filter_children)) + + return + + def refresh_security_group_rules(self, security_group_id): + return self._define_filter( + self.security_group_to_nwfilter_xml(security_group_id)) + + def security_group_to_nwfilter_xml(self, security_group_id): + security_group = db.security_group_get(context.get_admin_context(), + security_group_id) + rule_xml = "" + v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} + for rule in security_group.rules: + rule_xml += "" + if rule.cidr: + version = netutils.get_ip_version(rule.cidr) + if(FLAGS.use_ipv6 and version == 6): + net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (v6protocol[rule.protocol], net, prefixlen) + else: + net, mask = netutils.get_net_and_mask(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (rule.protocol, net, mask) + if rule.protocol in ['tcp', 'udp']: + rule_xml += "dstportstart='%s' dstportend='%s' " % \ + (rule.from_port, rule.to_port) + elif rule.protocol == 'icmp': + LOG.info('rule.protocol: %r, rule.from_port: %r, ' + 'rule.to_port: %r', rule.protocol, + rule.from_port, rule.to_port) + if rule.from_port != -1: + rule_xml += "type='%s' " % rule.from_port + if rule.to_port != -1: + rule_xml += "code='%s' " % rule.to_port + + rule_xml += '/>\n' + rule_xml += "\n" + xml = " + ${name} + ${memory_kb} + +#if $type == 'lxc' + #set $disk_prefix = '' + #set $disk_bus = '' + exe + /sbin/init +#else if $type == 'uml' + #set $disk_prefix = 'ubd' + #set $disk_bus = 'uml' + uml + /usr/bin/linux + /dev/ubda +#else + #if $type == 'xen' + #set $disk_prefix = 'sd' + #set $disk_bus = 'scsi' + linux + /dev/xvda + #else + #set $disk_prefix = 'vd' + #set $disk_bus = 'virtio' + hvm + #end if + #if $getVar('rescue', False) + ${basepath}/kernel.rescue + ${basepath}/ramdisk.rescue + #else + #if $getVar('kernel', None) + ${kernel} + #if $type == 'xen' + ro + #else + root=/dev/vda console=ttyS0 + #end if + #if $getVar('ramdisk', None) + ${ramdisk} + #end if + #else + + #end if + #end if +#end if + + + + + ${vcpus} + +#if $type == 'lxc' + + + + +#else + #if $getVar('rescue', False) + + + + + + + + + + + #else + + + + + + #if $getVar('local', False) + + + + + + #end if + #end if +#end if + +#for $nic in $nics + + + + + + + +#if $getVar('nic.extra_params', False) + ${nic.extra_params} +#end if +#if $getVar('nic.gateway_v6', False) + +#end if + + +#end for + + + + + + + + + + + + + + + + +#if $getVar('vncserver_host', False) + +#end if + + diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py new file mode 100644 index 000000000..3ed9a0fdc --- /dev/null +++ b/nova/virt/libvirt/netutils.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Network-releated utilities for supporting libvirt connection code.""" + + +import IPy + +from nova import context +from nova import db +from nova import flags +from nova import utils + + +FLAGS = flags.FLAGS + + +def get_net_and_mask(cidr): + net = IPy.IP(cidr) + return str(net.net()), str(net.netmask()) + + +def get_net_and_prefixlen(cidr): + net = IPy.IP(cidr) + return str(net.net()), str(net.prefixlen()) + + +def get_ip_version(cidr): + net = IPy.IP(cidr) + return int(net.version()) + + +def get_network_info(instance): + # TODO(adiantum) If we will keep this function + # we should cache network_info + admin_context = context.get_admin_context() + + ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, + instance['id']) + networks = db.network_get_all_by_instance(admin_context, + instance['id']) + flavor = db.instance_type_get_by_id(admin_context, + instance['instance_type_id']) + network_info = [] + + for network in networks: + network_ips = [ip for ip in ip_addresses + if ip['network_id'] == network['id']] + + def ip_dict(ip): + return { + 'ip': ip['address'], + 'netmask': network['netmask'], + 'enabled': '1'} + + def ip6_dict(): + prefix = network['cidr_v6'] + mac = instance['mac_address'] + return { + 'ip': utils.to_global_ipv6(prefix, mac), + 'netmask': network['netmask_v6'], + 'enabled': '1'} + + mapping = { + 'label': network['label'], + 'gateway': network['gateway'], + 'broadcast': network['broadcast'], + 'mac': instance['mac_address'], + 'rxtx_cap': flavor['rxtx_cap'], + 'dns': [network['dns']], + 'ips': [ip_dict(ip) for ip in network_ips]} + + if FLAGS.use_ipv6: + mapping['ip6s'] = [ip6_dict()] + mapping['gateway6'] = network['gateway_v6'] + + network_info.append((network, mapping)) + return network_info diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py deleted file mode 100644 index e76de47db..000000000 --- a/nova/virt/libvirt_conn.py +++ /dev/null @@ -1,2168 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A connection to a hypervisor through libvirt. - -Supports KVM, LXC, QEMU, UML, and XEN. - -**Related Flags** - -:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen - (default: kvm). -:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). -:libvirt_xml_template: Libvirt XML Template. -:rescue_image_id: Rescue ami image (default: ami-rescue). -:rescue_kernel_id: Rescue aki image (default: aki-rescue). -:rescue_ramdisk_id: Rescue ari image (default: ari-rescue). -:injected_network_template: Template file for injected network -:allow_project_net_traffic: Whether to allow in project network traffic - -""" - -import multiprocessing -import os -import random -import shutil -import subprocess -import sys -import tempfile -import time -import uuid -from xml.dom import minidom -from xml.etree import ElementTree - -from eventlet import greenthread -from eventlet import tpool - -import IPy - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import utils -from nova import vnc -from nova.auth import manager -from nova.compute import instance_types -from nova.compute import power_state -from nova.virt import disk -from nova.virt import driver -from nova.virt import images - -libvirt = None -libxml2 = None -Template = None - -LOG = logging.getLogger('nova.virt.libvirt_conn') - -FLAGS = flags.FLAGS -flags.DECLARE('live_migration_retry_count', 'nova.compute.manager') -# TODO(vish): These flags should probably go into a shared location -flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') -flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') -flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') - -flags.DEFINE_string('libvirt_xml_template', - utils.abspath('virt/libvirt.xml.template'), - 'Libvirt XML Template') -flags.DEFINE_string('libvirt_type', - 'kvm', - 'Libvirt domain type (valid options are: ' - 'kvm, lxc, qemu, uml, xen)') -flags.DEFINE_string('libvirt_uri', - '', - 'Override the default libvirt URI (which is dependent' - ' on libvirt_type)') -flags.DEFINE_bool('allow_project_net_traffic', - True, - 'Whether to allow in project network traffic') -flags.DEFINE_bool('use_cow_images', - True, - 'Whether to use cow images') -flags.DEFINE_string('ajaxterm_portrange', - '10000-12000', - 'Range of ports that ajaxterm should randomly try to bind') -flags.DEFINE_string('firewall_driver', - 'nova.virt.libvirt_conn.IptablesFirewallDriver', - 'Firewall driver (defaults to iptables)') -flags.DEFINE_string('cpuinfo_xml_template', - utils.abspath('virt/cpuinfo.xml.template'), - 'CpuInfo XML Template (Used only live migration now)') -flags.DEFINE_string('live_migration_uri', - "qemu+tcp://%s/system", - 'Define protocol used by live_migration feature') -flags.DEFINE_string('live_migration_flag', - "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", - 'Define live migration behavior.') -flags.DEFINE_integer('live_migration_bandwidth', 0, - 'Define live migration behavior') -flags.DEFINE_string('qemu_img', 'qemu-img', - 'binary to use for qemu-img commands') -flags.DEFINE_bool('start_guests_on_host_boot', False, - 'Whether to restart guests when the host reboots') - - -def get_connection(read_only): - # These are loaded late so that there's no need to install these - # libraries when not using libvirt. - # Cheetah is separate because the unit tests want to load Cheetah, - # but not libvirt. - global libvirt - global libxml2 - if libvirt is None: - libvirt = __import__('libvirt') - if libxml2 is None: - libxml2 = __import__('libxml2') - _late_load_cheetah() - return LibvirtConnection(read_only) - - -def _late_load_cheetah(): - global Template - if Template is None: - t = __import__('Cheetah.Template', globals(), locals(), - ['Template'], -1) - Template = t.Template - - -def _get_net_and_mask(cidr): - net = IPy.IP(cidr) - return str(net.net()), str(net.netmask()) - - -def _get_net_and_prefixlen(cidr): - net = IPy.IP(cidr) - return str(net.net()), str(net.prefixlen()) - - -def _get_ip_version(cidr): - net = IPy.IP(cidr) - return int(net.version()) - - -def _get_network_info(instance): - # TODO(adiantum) If we will keep this function - # we should cache network_info - admin_context = context.get_admin_context() - - ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, - instance['id']) - networks = db.network_get_all_by_instance(admin_context, - instance['id']) - flavor = db.instance_type_get_by_id(admin_context, - instance['instance_type_id']) - network_info = [] - - for network in networks: - network_ips = [ip for ip in ip_addresses - if ip['network_id'] == network['id']] - - def ip_dict(ip): - return { - 'ip': ip['address'], - 'netmask': network['netmask'], - 'enabled': '1'} - - def ip6_dict(): - prefix = network['cidr_v6'] - mac = instance['mac_address'] - return { - 'ip': utils.to_global_ipv6(prefix, mac), - 'netmask': network['netmask_v6'], - 'enabled': '1'} - - mapping = { - 'label': network['label'], - 'gateway': network['gateway'], - 'broadcast': network['broadcast'], - 'mac': instance['mac_address'], - 'rxtx_cap': flavor['rxtx_cap'], - 'dns': [network['dns']], - 'ips': [ip_dict(ip) for ip in network_ips]} - - if FLAGS.use_ipv6: - mapping['ip6s'] = [ip6_dict()] - mapping['gateway6'] = network['gateway_v6'] - - network_info.append((network, mapping)) - return network_info - - -class LibvirtConnection(driver.ComputeDriver): - - def __init__(self, read_only): - super(LibvirtConnection, self).__init__() - self.libvirt_uri = self.get_uri() - - self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() - self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read() - self._wrapped_conn = None - self.read_only = read_only - - fw_class = utils.import_class(FLAGS.firewall_driver) - self.firewall_driver = fw_class(get_connection=self._get_connection) - - def init_host(self, host): - # Adopt existing VM's running here - ctxt = context.get_admin_context() - for instance in db.instance_get_all_by_host(ctxt, host): - try: - LOG.debug(_('Checking state of %s'), instance['name']) - state = self.get_info(instance['name'])['state'] - except exception.NotFound: - state = power_state.SHUTOFF - - LOG.debug(_('Current state of %(name)s was %(state)s.'), - {'name': instance['name'], 'state': state}) - db.instance_set_state(ctxt, instance['id'], state) - - # NOTE(justinsb): We no longer delete SHUTOFF instances, - # the user may want to power them back on - - if state != power_state.RUNNING: - continue - self.firewall_driver.prepare_instance_filter(instance) - self.firewall_driver.apply_instance_filter(instance) - - def _get_connection(self): - if not self._wrapped_conn or not self._test_connection(): - LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri) - self._wrapped_conn = self._connect(self.libvirt_uri, - self.read_only) - return self._wrapped_conn - _conn = property(_get_connection) - - def _test_connection(self): - try: - self._wrapped_conn.getInfo() - return True - except libvirt.libvirtError as e: - if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ - e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - LOG.debug(_('Connection to libvirt broke')) - return False - raise - - def get_uri(self): - if FLAGS.libvirt_type == 'uml': - uri = FLAGS.libvirt_uri or 'uml:///system' - elif FLAGS.libvirt_type == 'xen': - uri = FLAGS.libvirt_uri or 'xen:///' - elif FLAGS.libvirt_type == 'lxc': - uri = FLAGS.libvirt_uri or 'lxc:///' - else: - uri = FLAGS.libvirt_uri or 'qemu:///system' - return uri - - def _connect(self, uri, read_only): - auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], - 'root', - None] - - if read_only: - return libvirt.openReadOnly(uri) - else: - return libvirt.openAuth(uri, auth, 0) - - def list_instances(self): - return [self._conn.lookupByID(x).name() - for x in self._conn.listDomainsID()] - - def _map_to_instance_info(self, domain): - """Gets info from a virsh domain object into an InstanceInfo""" - - # domain.info() returns a list of: - # state: one of the state values (virDomainState) - # maxMemory: the maximum memory used by the domain - # memory: the current amount of memory used by the domain - # nbVirtCPU: the number of virtual CPU - # puTime: the time used by the domain in nanoseconds - - (state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info() - name = domain.name() - - return driver.InstanceInfo(name, state) - - def list_instances_detail(self): - infos = [] - for domain_id in self._conn.listDomainsID(): - domain = self._conn.lookupByID(domain_id) - info = self._map_to_instance_info(domain) - infos.append(info) - return infos - - def destroy(self, instance, cleanup=True): - instance_name = instance['name'] - - try: - virt_dom = self._lookup_by_name(instance_name) - except exception.NotFound: - virt_dom = None - - # If the instance is already terminated, we're still happy - # Otherwise, destroy it - if virt_dom is not None: - try: - virt_dom.destroy() - except libvirt.libvirtError as e: - is_okay = False - errcode = e.get_error_code() - if errcode == libvirt.VIR_ERR_OPERATION_INVALID: - # If the instance if already shut off, we get this: - # Code=55 Error=Requested operation is not valid: - # domain is not running - (state, _max_mem, _mem, _cpus, _t) = virt_dom.info() - if state == power_state.SHUTOFF: - is_okay = True - - if not is_okay: - LOG.warning(_("Error from libvirt during destroy of " - "%(instance_name)s. Code=%(errcode)s " - "Error=%(e)s") % - locals()) - raise - - try: - # NOTE(justinsb): We remove the domain definition. We probably - # would do better to keep it if cleanup=False (e.g. volumes?) - # (e.g. #2 - not losing machines on failure) - virt_dom.undefine() - except libvirt.libvirtError as e: - errcode = e.get_error_code() - LOG.warning(_("Error from libvirt during undefine of " - "%(instance_name)s. Code=%(errcode)s " - "Error=%(e)s") % - locals()) - raise - - def _wait_for_destroy(): - """Called at an interval until the VM is gone.""" - instance_name = instance['name'] - - try: - state = self.get_info(instance_name)['state'] - except exception.NotFound: - msg = _("Instance %s destroyed successfully.") % instance_name - LOG.info(msg) - raise utils.LoopingCallDone - - timer = utils.LoopingCall(_wait_for_destroy) - timer.start(interval=0.5, now=True) - - self.firewall_driver.unfilter_instance(instance) - - if cleanup: - self._cleanup(instance) - - return True - - def _cleanup(self, instance): - target = os.path.join(FLAGS.instances_path, instance['name']) - instance_name = instance['name'] - LOG.info(_('instance %(instance_name)s: deleting instance files' - ' %(target)s') % locals()) - if FLAGS.libvirt_type == 'lxc': - disk.destroy_container(target, instance, nbd=FLAGS.use_cow_images) - if os.path.exists(target): - shutil.rmtree(target) - - @exception.wrap_exception - def attach_volume(self, instance_name, device_path, mountpoint): - virt_dom = self._lookup_by_name(instance_name) - mount_device = mountpoint.rpartition("/")[2] - if device_path.startswith('/dev/'): - xml = """ - - - - """ % (device_path, mount_device) - elif ':' in device_path: - (protocol, name) = device_path.split(':') - xml = """ - - - - """ % (protocol, - name, - mount_device) - else: - raise exception.Invalid(_("Invalid device path %s") % device_path) - - virt_dom.attachDevice(xml) - - def _get_disk_xml(self, xml, device): - """Returns the xml for the disk mounted at device""" - try: - doc = libxml2.parseDoc(xml) - except: - return None - ctx = doc.xpathNewContext() - try: - ret = ctx.xpathEval('/domain/devices/disk') - for node in ret: - for child in node.children: - if child.name == 'target': - if child.prop('dev') == device: - return str(node) - finally: - if ctx is not None: - ctx.xpathFreeContext() - if doc is not None: - doc.freeDoc() - - @exception.wrap_exception - def detach_volume(self, instance_name, mountpoint): - virt_dom = self._lookup_by_name(instance_name) - mount_device = mountpoint.rpartition("/")[2] - xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) - if not xml: - raise exception.NotFound(_("No disk at %s") % mount_device) - virt_dom.detachDevice(xml) - - @exception.wrap_exception - def snapshot(self, instance, image_id): - """Create snapshot from a running VM instance. - - This command only works with qemu 0.14+, the qemu_img flag is - provided so that a locally compiled binary of qemu-img can be used - to support this command. - - """ - image_service = utils.import_object(FLAGS.image_service) - virt_dom = self._lookup_by_name(instance['name']) - elevated = context.get_admin_context() - - base = image_service.show(elevated, instance['image_id']) - - metadata = {'disk_format': base['disk_format'], - 'container_format': base['container_format'], - 'is_public': False, - 'name': '%s.%s' % (base['name'], image_id), - 'properties': {'architecture': base['architecture'], - 'kernel_id': instance['kernel_id'], - 'image_location': 'snapshot', - 'image_state': 'available', - 'owner_id': instance['project_id'], - 'ramdisk_id': instance['ramdisk_id'], - } - } - - # Make the snapshot - snapshot_name = uuid.uuid4().hex - snapshot_xml = """ - - %s - - """ % snapshot_name - snapshot_ptr = virt_dom.snapshotCreateXML(snapshot_xml, 0) - - # Find the disk - xml_desc = virt_dom.XMLDesc(0) - domain = ElementTree.fromstring(xml_desc) - source = domain.find('devices/disk/source') - disk_path = source.get('file') - - # Export the snapshot to a raw image - temp_dir = tempfile.mkdtemp() - out_path = os.path.join(temp_dir, snapshot_name) - qemu_img_cmd = (FLAGS.qemu_img, - 'convert', - '-f', - 'qcow2', - '-O', - 'raw', - '-s', - snapshot_name, - disk_path, - out_path) - utils.execute(*qemu_img_cmd) - - # Upload that image to the image service - with open(out_path) as image_file: - image_service.update(elevated, - image_id, - metadata, - image_file) - - # Clean up - shutil.rmtree(temp_dir) - - @exception.wrap_exception - def reboot(self, instance): - """Reboot a virtual machine, given an instance reference. - - This method actually destroys and re-creates the domain to ensure the - reboot happens, as the guest OS cannot ignore this action. - - """ - self.destroy(instance, False) - xml = self.to_xml(instance) - self.firewall_driver.setup_basic_filtering(instance) - self.firewall_driver.prepare_instance_filter(instance) - self._create_new_domain(xml) - self.firewall_driver.apply_instance_filter(instance) - - def _wait_for_reboot(): - """Called at an interval until the VM is running again.""" - instance_name = instance['name'] - - try: - state = self.get_info(instance_name)['state'] - except exception.NotFound: - msg = _("During reboot, %s disappeared.") % instance_name - LOG.error(msg) - raise utils.LoopingCallDone - - if state == power_state.RUNNING: - msg = _("Instance %s rebooted successfully.") % instance_name - LOG.info(msg) - raise utils.LoopingCallDone - - timer = utils.LoopingCall(_wait_for_reboot) - return timer.start(interval=0.5, now=True) - - @exception.wrap_exception - def pause(self, instance, callback): - raise exception.ApiError("pause not supported for libvirt.") - - @exception.wrap_exception - def unpause(self, instance, callback): - raise exception.ApiError("unpause not supported for libvirt.") - - @exception.wrap_exception - def suspend(self, instance, callback): - raise exception.ApiError("suspend not supported for libvirt") - - @exception.wrap_exception - def resume(self, instance, callback): - raise exception.ApiError("resume not supported for libvirt") - - @exception.wrap_exception - def rescue(self, instance): - """Loads a VM using rescue images. - - A rescue is normally performed when something goes wrong with the - primary images and data needs to be corrected/recovered. Rescuing - should not edit or over-ride the original image, only allow for - data recovery. - - """ - self.destroy(instance, False) - - xml = self.to_xml(instance, rescue=True) - rescue_images = {'image_id': FLAGS.rescue_image_id, - 'kernel_id': FLAGS.rescue_kernel_id, - 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(instance, xml, '.rescue', rescue_images) - self._create_new_domain(xml) - - def _wait_for_rescue(): - """Called at an interval until the VM is running again.""" - instance_name = instance['name'] - - try: - state = self.get_info(instance_name)['state'] - except exception.NotFound: - msg = _("During reboot, %s disappeared.") % instance_name - LOG.error(msg) - raise utils.LoopingCallDone - - if state == power_state.RUNNING: - msg = _("Instance %s rescued successfully.") % instance_name - LOG.info(msg) - raise utils.LoopingCallDone - - timer = utils.LoopingCall(_wait_for_rescue) - return timer.start(interval=0.5, now=True) - - @exception.wrap_exception - def unrescue(self, instance): - """Reboot the VM which is being rescued back into primary images. - - Because reboot destroys and re-creates instances, unresue should - simply call reboot. - - """ - self.reboot(instance) - - @exception.wrap_exception - def poll_rescued_instances(self, timeout): - pass - - # NOTE(ilyaalekseyev): Implementation like in multinics - # for xenapi(tr3buchet) - @exception.wrap_exception - def spawn(self, instance, network_info=None): - xml = self.to_xml(instance, False, network_info) - self.firewall_driver.setup_basic_filtering(instance, network_info) - self.firewall_driver.prepare_instance_filter(instance, network_info) - self._create_image(instance, xml, network_info) - domain = self._create_new_domain(xml) - LOG.debug(_("instance %s: is running"), instance['name']) - self.firewall_driver.apply_instance_filter(instance) - - if FLAGS.start_guests_on_host_boot: - LOG.debug(_("instance %s: setting autostart ON") % - instance['name']) - domain.setAutostart(1) - - def _wait_for_boot(): - """Called at an interval until the VM is running.""" - instance_name = instance['name'] - - try: - state = self.get_info(instance_name)['state'] - except exception.NotFound: - msg = _("During reboot, %s disappeared.") % instance_name - LOG.error(msg) - raise utils.LoopingCallDone - - if state == power_state.RUNNING: - msg = _("Instance %s spawned successfully.") % instance_name - LOG.info(msg) - raise utils.LoopingCallDone - - timer = utils.LoopingCall(_wait_for_boot) - return timer.start(interval=0.5, now=True) - - def _flush_xen_console(self, virsh_output): - LOG.info(_('virsh said: %r'), virsh_output) - virsh_output = virsh_output[0].strip() - - if virsh_output.startswith('/dev/'): - LOG.info(_("cool, it's a device")) - out, err = utils.execute('sudo', 'dd', - "if=%s" % virsh_output, - 'iflag=nonblock', - check_exit_code=False) - return out - else: - return '' - - def _append_to_file(self, data, fpath): - LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals()) - fp = open(fpath, 'a+') - fp.write(data) - return fpath - - def _dump_file(self, fpath): - fp = open(fpath, 'r+') - contents = fp.read() - LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals()) - return contents - - @exception.wrap_exception - def get_console_output(self, instance): - console_log = os.path.join(FLAGS.instances_path, instance['name'], - 'console.log') - - utils.execute('sudo', 'chown', os.getuid(), console_log) - - if FLAGS.libvirt_type == 'xen': - # Xen is special - virsh_output = utils.execute('virsh', 'ttyconsole', - instance['name']) - data = self._flush_xen_console(virsh_output) - fpath = self._append_to_file(data, console_log) - elif FLAGS.libvirt_type == 'lxc': - # LXC is also special - LOG.info(_("Unable to read LXC console")) - else: - fpath = console_log - - return self._dump_file(fpath) - - @exception.wrap_exception - def get_ajax_console(self, instance): - def get_open_port(): - start_port, end_port = FLAGS.ajaxterm_portrange.split("-") - for i in xrange(0, 100): # don't loop forever - port = random.randint(int(start_port), int(end_port)) - # netcat will exit with 0 only if the port is in use, - # so a nonzero return value implies it is unused - cmd = 'netcat', '0.0.0.0', port, '-w', '1' - try: - stdout, stderr = utils.execute(*cmd, process_input='') - except exception.ProcessExecutionError: - return port - raise Exception(_('Unable to find an open port')) - - def get_pty_for_instance(instance_name): - virt_dom = self._lookup_by_name(instance_name) - xml = virt_dom.XMLDesc(0) - dom = minidom.parseString(xml) - - for serial in dom.getElementsByTagName('serial'): - if serial.getAttribute('type') == 'pty': - source = serial.getElementsByTagName('source')[0] - return source.getAttribute('path') - - port = get_open_port() - token = str(uuid.uuid4()) - host = instance['host'] - - ajaxterm_cmd = 'sudo socat - %s' \ - % get_pty_for_instance(instance['name']) - - cmd = '%s/tools/ajaxterm/ajaxterm.py --command "%s" -t %s -p %s' \ - % (utils.novadir(), ajaxterm_cmd, token, port) - - subprocess.Popen(cmd, shell=True) - return {'token': token, 'host': host, 'port': port} - - @exception.wrap_exception - def get_vnc_console(self, instance): - def get_vnc_port_for_instance(instance_name): - virt_dom = self._lookup_by_name(instance_name) - xml = virt_dom.XMLDesc(0) - # TODO: use etree instead of minidom - dom = minidom.parseString(xml) - - for graphic in dom.getElementsByTagName('graphics'): - if graphic.getAttribute('type') == 'vnc': - return graphic.getAttribute('port') - - port = get_vnc_port_for_instance(instance['name']) - token = str(uuid.uuid4()) - host = instance['host'] - - return {'token': token, 'host': host, 'port': port} - - @staticmethod - def _cache_image(fn, target, fname, cow=False, *args, **kwargs): - """Wrapper for a method that creates an image that caches the image. - - This wrapper will save the image into a common store and create a - copy for use by the hypervisor. - - The underlying method should specify a kwarg of target representing - where the image will be saved. - - fname is used as the filename of the base image. The filename needs - to be unique to a given image. - - If cow is True, it will make a CoW image instead of a copy. - """ - if not os.path.exists(target): - base_dir = os.path.join(FLAGS.instances_path, '_base') - if not os.path.exists(base_dir): - os.mkdir(base_dir) - base = os.path.join(base_dir, fname) - - @utils.synchronized(fname) - def call_if_not_exists(base, fn, *args, **kwargs): - if not os.path.exists(base): - fn(target=base, *args, **kwargs) - - call_if_not_exists(base, fn, *args, **kwargs) - - if cow: - utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o', - 'cluster_size=2M,backing_file=%s' % base, - target) - else: - utils.execute('cp', base, target) - - def _fetch_image(self, target, image_id, user, project, size=None): - """Grab image and optionally attempt to resize it""" - images.fetch(image_id, target, user, project) - if size: - disk.extend(target, size) - - def _create_local(self, target, local_gb): - """Create a blank image of specified size""" - utils.execute('truncate', target, '-s', "%dG" % local_gb) - # TODO(vish): should we format disk by default? - - def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, - network_info=None): - if not network_info: - network_info = _get_network_info(inst) - - if not suffix: - suffix = '' - - # syntactic nicety - def basepath(fname='', suffix=suffix): - return os.path.join(FLAGS.instances_path, - inst['name'], - fname + suffix) - - # ensure directories exist and are writable - utils.execute('mkdir', '-p', basepath(suffix='')) - - LOG.info(_('instance %s: Creating image'), inst['name']) - f = open(basepath('libvirt.xml'), 'w') - f.write(libvirt_xml) - f.close() - - if FLAGS.libvirt_type == 'lxc': - container_dir = '%s/rootfs' % basepath(suffix='') - utils.execute('mkdir', '-p', container_dir) - - # NOTE(vish): No need add the suffix to console.log - os.close(os.open(basepath('console.log', ''), - os.O_CREAT | os.O_WRONLY, 0660)) - - user = manager.AuthManager().get_user(inst['user_id']) - project = manager.AuthManager().get_project(inst['project_id']) - - if not disk_images: - disk_images = {'image_id': inst['image_id'], - 'kernel_id': inst['kernel_id'], - 'ramdisk_id': inst['ramdisk_id']} - - if disk_images['kernel_id']: - fname = '%08x' % int(disk_images['kernel_id']) - self._cache_image(fn=self._fetch_image, - target=basepath('kernel'), - fname=fname, - image_id=disk_images['kernel_id'], - user=user, - project=project) - if disk_images['ramdisk_id']: - fname = '%08x' % int(disk_images['ramdisk_id']) - self._cache_image(fn=self._fetch_image, - target=basepath('ramdisk'), - fname=fname, - image_id=disk_images['ramdisk_id'], - user=user, - project=project) - - root_fname = '%08x' % int(disk_images['image_id']) - size = FLAGS.minimum_root_size - - inst_type_id = inst['instance_type_id'] - inst_type = instance_types.get_instance_type(inst_type_id) - if inst_type['name'] == 'm1.tiny' or suffix == '.rescue': - size = None - root_fname += "_sm" - - self._cache_image(fn=self._fetch_image, - target=basepath('disk'), - fname=root_fname, - cow=FLAGS.use_cow_images, - image_id=disk_images['image_id'], - user=user, - project=project, - size=size) - - if inst_type['local_gb']: - self._cache_image(fn=self._create_local, - target=basepath('disk.local'), - fname="local_%s" % inst_type['local_gb'], - cow=FLAGS.use_cow_images, - local_gb=inst_type['local_gb']) - - # For now, we assume that if we're not using a kernel, we're using a - # partitioned disk image where the target partition is the first - # partition - target_partition = None - if not inst['kernel_id']: - target_partition = "1" - - if FLAGS.libvirt_type == 'lxc': - target_partition = None - - if inst['key_data']: - key = str(inst['key_data']) - else: - key = None - net = None - - nets = [] - ifc_template = open(FLAGS.injected_network_template).read() - ifc_num = -1 - have_injected_networks = False - admin_context = context.get_admin_context() - for (network_ref, mapping) in network_info: - ifc_num += 1 - - if not network_ref['injected']: - continue - - have_injected_networks = True - address = mapping['ips'][0]['ip'] - address_v6 = None - if FLAGS.use_ipv6: - address_v6 = mapping['ip6s'][0]['ip'] - net_info = {'name': 'eth%d' % ifc_num, - 'address': address, - 'netmask': network_ref['netmask'], - 'gateway': network_ref['gateway'], - 'broadcast': network_ref['broadcast'], - 'dns': network_ref['dns'], - 'address_v6': address_v6, - 'gateway_v6': network_ref['gateway_v6'], - 'netmask_v6': network_ref['netmask_v6']} - nets.append(net_info) - - if have_injected_networks: - net = str(Template(ifc_template, - searchList=[{'interfaces': nets, - 'use_ipv6': FLAGS.use_ipv6}])) - - if key or net: - inst_name = inst['name'] - img_id = inst.image_id - if key: - LOG.info(_('instance %(inst_name)s: injecting key into' - ' image %(img_id)s') % locals()) - if net: - LOG.info(_('instance %(inst_name)s: injecting net into' - ' image %(img_id)s') % locals()) - try: - disk.inject_data(basepath('disk'), key, net, - partition=target_partition, - nbd=FLAGS.use_cow_images) - - if FLAGS.libvirt_type == 'lxc': - disk.setup_container(basepath('disk'), - container_dir=container_dir, - nbd=FLAGS.use_cow_images) - except Exception as e: - # This could be a windows image, or a vmdk format disk - LOG.warn(_('instance %(inst_name)s: ignoring error injecting' - ' data into image %(img_id)s (%(e)s)') % locals()) - - if FLAGS.libvirt_type == 'uml': - utils.execute('sudo', 'chown', 'root', basepath('disk')) - - def _get_nic_for_xml(self, network, mapping): - # Assume that the gateway also acts as the dhcp server. - dhcp_server = network['gateway'] - gateway_v6 = network['gateway_v6'] - mac_id = mapping['mac'].replace(':', '') - - if FLAGS.allow_project_net_traffic: - if FLAGS.use_ipv6: - net, mask = _get_net_and_mask(network['cidr']) - net_v6, prefixlen_v6 = _get_net_and_prefixlen( - network['cidr_v6']) - extra_params = ("\n" - "\n" - "\n" - "\n") % \ - (net, mask, net_v6, prefixlen_v6) - else: - net, mask = _get_net_and_mask(network['cidr']) - extra_params = ("\n" - "\n") % \ - (net, mask) - else: - extra_params = "\n" - - result = { - 'id': mac_id, - 'bridge_name': network['bridge'], - 'mac_address': mapping['mac'], - 'ip_address': mapping['ips'][0]['ip'], - 'dhcp_server': dhcp_server, - 'extra_params': extra_params, - } - - if gateway_v6: - result['gateway_v6'] = gateway_v6 + "/128" - - return result - - def to_xml(self, instance, rescue=False, network_info=None): - # TODO(termie): cache? - LOG.debug(_('instance %s: starting toXML method'), instance['name']) - - # TODO(adiantum) remove network_info creation code - # when multinics will be completed - if not network_info: - network_info = _get_network_info(instance) - - nics = [] - for (network, mapping) in network_info: - nics.append(self._get_nic_for_xml(network, - mapping)) - # FIXME(vish): stick this in db - inst_type_id = instance['instance_type_id'] - inst_type = instance_types.get_instance_type(inst_type_id) - - if FLAGS.use_cow_images: - driver_type = 'qcow2' - else: - driver_type = 'raw' - - xml_info = {'type': FLAGS.libvirt_type, - 'name': instance['name'], - 'basepath': os.path.join(FLAGS.instances_path, - instance['name']), - 'memory_kb': inst_type['memory_mb'] * 1024, - 'vcpus': inst_type['vcpus'], - 'rescue': rescue, - 'local': inst_type['local_gb'], - 'driver_type': driver_type, - 'nics': nics} - - if FLAGS.vnc_enabled: - if FLAGS.libvirt_type != 'lxc': - xml_info['vncserver_host'] = FLAGS.vncserver_host - if not rescue: - if instance['kernel_id']: - xml_info['kernel'] = xml_info['basepath'] + "/kernel" - - if instance['ramdisk_id']: - xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" - - xml_info['disk'] = xml_info['basepath'] + "/disk" - - xml = str(Template(self.libvirt_xml, searchList=[xml_info])) - LOG.debug(_('instance %s: finished toXML method'), - instance['name']) - return xml - - def _lookup_by_name(self, instance_name): - """Retrieve libvirt domain object given an instance name. - - All libvirt error handling should be handled in this method and - relevant nova exceptions should be raised in response. - - """ - try: - return self._conn.lookupByName(instance_name) - except libvirt.libvirtError as ex: - error_code = ex.get_error_code() - if error_code == libvirt.VIR_ERR_NO_DOMAIN: - msg = _("Instance %s not found") % instance_name - raise exception.NotFound(msg) - - msg = _("Error from libvirt while looking up %(instance_name)s: " - "[Error Code %(error_code)s] %(ex)s") % locals() - raise exception.Error(msg) - - def get_info(self, instance_name): - """Retrieve information from libvirt for a specific instance name. - - If a libvirt error is encountered during lookup, we might raise a - NotFound exception or Error exception depending on how severe the - libvirt error is. - - """ - virt_dom = self._lookup_by_name(instance_name) - (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() - return {'state': state, - 'max_mem': max_mem, - 'mem': mem, - 'num_cpu': num_cpu, - 'cpu_time': cpu_time} - - def _create_new_domain(self, xml, persistent=True, launch_flags=0): - # NOTE(justinsb): libvirt has two types of domain: - # * a transient domain disappears when the guest is shutdown - # or the host is rebooted. - # * a permanent domain is not automatically deleted - # NOTE(justinsb): Even for ephemeral instances, transient seems risky - - if persistent: - # To create a persistent domain, first define it, then launch it. - domain = self._conn.defineXML(xml) - - domain.createWithFlags(launch_flags) - else: - # createXML call creates a transient domain - domain = self._conn.createXML(xml, launch_flags) - - return domain - - def get_diagnostics(self, instance_name): - raise exception.ApiError(_("diagnostics are not supported " - "for libvirt")) - - def get_disks(self, instance_name): - """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. - - Returns a list of all block devices for this domain. - """ - domain = self._lookup_by_name(instance_name) - # TODO(devcamcar): Replace libxml2 with etree. - xml = domain.XMLDesc(0) - doc = None - - try: - doc = libxml2.parseDoc(xml) - except: - return [] - - ctx = doc.xpathNewContext() - disks = [] - - try: - ret = ctx.xpathEval('/domain/devices/disk') - - for node in ret: - devdst = None - - for child in node.children: - if child.name == 'target': - devdst = child.prop('dev') - - if devdst is None: - continue - - disks.append(devdst) - finally: - if ctx is not None: - ctx.xpathFreeContext() - if doc is not None: - doc.freeDoc() - - return disks - - def get_interfaces(self, instance_name): - """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. - - Returns a list of all network interfaces for this instance. - """ - domain = self._lookup_by_name(instance_name) - # TODO(devcamcar): Replace libxml2 with etree. - xml = domain.XMLDesc(0) - doc = None - - try: - doc = libxml2.parseDoc(xml) - except: - return [] - - ctx = doc.xpathNewContext() - interfaces = [] - - try: - ret = ctx.xpathEval('/domain/devices/interface') - - for node in ret: - devdst = None - - for child in node.children: - if child.name == 'target': - devdst = child.prop('dev') - - if devdst is None: - continue - - interfaces.append(devdst) - finally: - if ctx is not None: - ctx.xpathFreeContext() - if doc is not None: - doc.freeDoc() - - return interfaces - - def get_vcpu_total(self): - """Get vcpu number of physical computer. - - :returns: the number of cpu core. - - """ - - # On certain platforms, this will raise a NotImplementedError. - try: - return multiprocessing.cpu_count() - except NotImplementedError: - LOG.warn(_("Cannot get the number of cpu, because this " - "function is not implemented for this platform. " - "This error can be safely ignored for now.")) - return 0 - - def get_memory_mb_total(self): - """Get the total memory size(MB) of physical computer. - - :returns: the total amount of memory(MB). - - """ - - if sys.platform.upper() != 'LINUX2': - return 0 - - meminfo = open('/proc/meminfo').read().split() - idx = meminfo.index('MemTotal:') - # transforming kb to mb. - return int(meminfo[idx + 1]) / 1024 - - def get_local_gb_total(self): - """Get the total hdd size(GB) of physical computer. - - :returns: - The total amount of HDD(GB). - Note that this value shows a partition where - NOVA-INST-DIR/instances mounts. - - """ - - hddinfo = os.statvfs(FLAGS.instances_path) - return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024 - - def get_vcpu_used(self): - """ Get vcpu usage number of physical computer. - - :returns: The total number of vcpu that currently used. - - """ - - total = 0 - for dom_id in self._conn.listDomainsID(): - dom = self._conn.lookupByID(dom_id) - total += len(dom.vcpus()[1]) - return total - - def get_memory_mb_used(self): - """Get the free memory size(MB) of physical computer. - - :returns: the total usage of memory(MB). - - """ - - if sys.platform.upper() != 'LINUX2': - return 0 - - m = open('/proc/meminfo').read().split() - idx1 = m.index('MemFree:') - idx2 = m.index('Buffers:') - idx3 = m.index('Cached:') - avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024 - return self.get_memory_mb_total() - avail - - def get_local_gb_used(self): - """Get the free hdd size(GB) of physical computer. - - :returns: - The total usage of HDD(GB). - Note that this value shows a partition where - NOVA-INST-DIR/instances mounts. - - """ - - hddinfo = os.statvfs(FLAGS.instances_path) - avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024 - return self.get_local_gb_total() - avail - - def get_hypervisor_type(self): - """Get hypervisor type. - - :returns: hypervisor type (ex. qemu) - - """ - - return self._conn.getType() - - def get_hypervisor_version(self): - """Get hypervisor version. - - :returns: hypervisor version (ex. 12003) - - """ - - # NOTE(justinsb): getVersion moved between libvirt versions - # Trying to do be compatible with older versions is a lost cause - # But ... we can at least give the user a nice message - method = getattr(self._conn, 'getVersion', None) - if method is None: - raise exception.Error(_("libvirt version is too old" - " (does not support getVersion)")) - # NOTE(justinsb): If we wanted to get the version, we could: - # method = getattr(libvirt, 'getVersion', None) - # NOTE(justinsb): This would then rely on a proper version check - - return method() - - def get_cpu_info(self): - """Get cpuinfo information. - - Obtains cpu feature from virConnect.getCapabilities, - and returns as a json string. - - :return: see above description - - """ - - xml = self._conn.getCapabilities() - xml = libxml2.parseDoc(xml) - nodes = xml.xpathEval('//host/cpu') - if len(nodes) != 1: - raise exception.Invalid(_("Invalid xml. '' must be 1," - "but %d\n") % len(nodes) - + xml.serialize()) - - cpu_info = dict() - - arch_nodes = xml.xpathEval('//host/cpu/arch') - if arch_nodes: - cpu_info['arch'] = arch_nodes[0].getContent() - - model_nodes = xml.xpathEval('//host/cpu/model') - if model_nodes: - cpu_info['model'] = model_nodes[0].getContent() - - vendor_nodes = xml.xpathEval('//host/cpu/vendor') - if vendor_nodes: - cpu_info['vendor'] = vendor_nodes[0].getContent() - - topology_nodes = xml.xpathEval('//host/cpu/topology') - topology = dict() - if topology_nodes: - topology_node = topology_nodes[0].get_properties() - while topology_node: - name = topology_node.get_name() - topology[name] = topology_node.getContent() - topology_node = topology_node.get_next() - - keys = ['cores', 'sockets', 'threads'] - tkeys = topology.keys() - if set(tkeys) != set(keys): - ks = ', '.join(keys) - raise exception.Invalid(_("Invalid xml: topology" - "(%(topology)s) must have " - "%(ks)s") % locals()) - - feature_nodes = xml.xpathEval('//host/cpu/feature') - features = list() - for nodes in feature_nodes: - features.append(nodes.get_properties().getContent()) - - cpu_info['topology'] = topology - cpu_info['features'] = features - return utils.dumps(cpu_info) - - def block_stats(self, instance_name, disk): - """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. - """ - domain = self._lookup_by_name(instance_name) - return domain.blockStats(disk) - - def interface_stats(self, instance_name, interface): - """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. - """ - domain = self._lookup_by_name(instance_name) - return domain.interfaceStats(interface) - - def get_console_pool_info(self, console_type): - #TODO(mdragon): console proxy should be implemented for libvirt, - # in case someone wants to use it with kvm or - # such. For now return fake data. - return {'address': '127.0.0.1', - 'username': 'fakeuser', - 'password': 'fakepassword'} - - def refresh_security_group_rules(self, security_group_id): - self.firewall_driver.refresh_security_group_rules(security_group_id) - - def refresh_security_group_members(self, security_group_id): - self.firewall_driver.refresh_security_group_members(security_group_id) - - def update_available_resource(self, ctxt, host): - """Updates compute manager resource info on ComputeNode table. - - This method is called when nova-coompute launches, and - whenever admin executes "nova-manage service update_resource". - - :param ctxt: security context - :param host: hostname that compute manager is currently running - - """ - - try: - service_ref = db.service_get_all_compute_by_host(ctxt, host)[0] - except exception.NotFound: - raise exception.Invalid(_("Cannot update compute manager " - "specific info, because no service " - "record was found.")) - - # Updating host information - dic = {'vcpus': self.get_vcpu_total(), - 'memory_mb': self.get_memory_mb_total(), - 'local_gb': self.get_local_gb_total(), - 'vcpus_used': self.get_vcpu_used(), - 'memory_mb_used': self.get_memory_mb_used(), - 'local_gb_used': self.get_local_gb_used(), - 'hypervisor_type': self.get_hypervisor_type(), - 'hypervisor_version': self.get_hypervisor_version(), - 'cpu_info': self.get_cpu_info()} - - compute_node_ref = service_ref['compute_node'] - if not compute_node_ref: - LOG.info(_('Compute_service record created for %s ') % host) - dic['service_id'] = service_ref['id'] - db.compute_node_create(ctxt, dic) - else: - LOG.info(_('Compute_service record updated for %s ') % host) - db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic) - - def compare_cpu(self, cpu_info): - """Checks the host cpu is compatible to a cpu given by xml. - - "xml" must be a part of libvirt.openReadonly().getCapabilities(). - return values follows by virCPUCompareResult. - if 0 > return value, do live migration. - 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' - - :param cpu_info: json string that shows cpu feature(see get_cpu_info()) - :returns: - None. if given cpu info is not compatible to this server, - raise exception. - - """ - - LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info) - dic = utils.loads(cpu_info) - xml = str(Template(self.cpuinfo_xml, searchList=dic)) - LOG.info(_('to xml...\n:%s ' % xml)) - - u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult" - m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s") - # unknown character exists in xml, then libvirt complains - try: - ret = self._conn.compareCPU(xml, 0) - except libvirt.libvirtError, e: - ret = e.message - LOG.error(m % locals()) - raise - - if ret <= 0: - raise exception.Invalid(m % locals()) - - return - - def ensure_filtering_rules_for_instance(self, instance_ref, - time=None): - """Setting up filtering rules and waiting for its completion. - - To migrate an instance, filtering rules to hypervisors - and firewalls are inevitable on destination host. - ( Waiting only for filterling rules to hypervisor, - since filtering rules to firewall rules can be set faster). - - Concretely, the below method must be called. - - setup_basic_filtering (for nova-basic, etc.) - - prepare_instance_filter(for nova-instance-instance-xxx, etc.) - - to_xml may have to be called since it defines PROJNET, PROJMASK. - but libvirt migrates those value through migrateToURI(), - so , no need to be called. - - Don't use thread for this method since migration should - not be started when setting-up filtering rules operations - are not completed. - - :params instance_ref: nova.db.sqlalchemy.models.Instance object - - """ - - if not time: - time = greenthread - - # If any instances never launch at destination host, - # basic-filtering must be set here. - self.firewall_driver.setup_basic_filtering(instance_ref) - # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref) - - # wait for completion - timeout_count = range(FLAGS.live_migration_retry_count) - while timeout_count: - if self.firewall_driver.instance_filter_exists(instance_ref): - break - timeout_count.pop() - if len(timeout_count) == 0: - msg = _('Timeout migrating for %s. nwfilter not found.') - raise exception.Error(msg % instance_ref.name) - time.sleep(1) - - def live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): - """Spawning live_migration operation for distributing high-load. - - :params ctxt: security context - :params instance_ref: - nova.db.sqlalchemy.models.Instance object - instance object that is migrated. - :params dest: destination host - :params post_method: - post operation method. - expected nova.compute.manager.post_live_migration. - :params recover_method: - recovery method when any exception occurs. - expected nova.compute.manager.recover_live_migration. - - """ - - greenthread.spawn(self._live_migration, ctxt, instance_ref, dest, - post_method, recover_method) - - def _live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): - """Do live migration. - - :params ctxt: security context - :params instance_ref: - nova.db.sqlalchemy.models.Instance object - instance object that is migrated. - :params dest: destination host - :params post_method: - post operation method. - expected nova.compute.manager.post_live_migration. - :params recover_method: - recovery method when any exception occurs. - expected nova.compute.manager.recover_live_migration. - - """ - - # Do live migration. - try: - flaglist = FLAGS.live_migration_flag.split(',') - flagvals = [getattr(libvirt, x.strip()) for x in flaglist] - logical_sum = reduce(lambda x, y: x | y, flagvals) - - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) - - except Exception: - recover_method(ctxt, instance_ref, dest=dest) - raise - - # Waiting for completion of live_migration. - timer = utils.LoopingCall(f=None) - - def wait_for_live_migration(): - """waiting for live migration completion""" - try: - self.get_info(instance_ref.name)['state'] - except exception.NotFound: - timer.stop() - post_method(ctxt, instance_ref, dest) - - timer.f = wait_for_live_migration - timer.start(interval=0.5, now=True) - - def unfilter_instance(self, instance_ref): - """See comments of same method in firewall_driver.""" - self.firewall_driver.unfilter_instance(instance_ref) - - -class FirewallDriver(object): - def prepare_instance_filter(self, instance, network_info=None): - """Prepare filters for the instance. - - At this point, the instance isn't running yet.""" - raise NotImplementedError() - - def unfilter_instance(self, instance): - """Stop filtering instance""" - raise NotImplementedError() - - def apply_instance_filter(self, instance): - """Apply instance filter. - - Once this method returns, the instance should be firewalled - appropriately. This method should as far as possible be a - no-op. It's vastly preferred to get everything set up in - prepare_instance_filter. - """ - raise NotImplementedError() - - def refresh_security_group_rules(self, security_group_id): - """Refresh security group rules from data store - - Gets called when a rule has been added to or removed from - the security group.""" - raise NotImplementedError() - - def refresh_security_group_members(self, security_group_id): - """Refresh security group members from data store - - Gets called when an instance gets added to or removed from - the security group.""" - raise NotImplementedError() - - def setup_basic_filtering(self, instance, network_info=None): - """Create rules to block spoofing and allow dhcp. - - This gets called when spawning an instance, before - :method:`prepare_instance_filter`. - - """ - raise NotImplementedError() - - def instance_filter_exists(self, instance): - """Check nova-instance-instance-xxx exists""" - raise NotImplementedError() - - -class NWFilterFirewall(FirewallDriver): - """ - This class implements a network filtering mechanism versatile - enough for EC2 style Security Group filtering by leveraging - libvirt's nwfilter. - - First, all instances get a filter ("nova-base-filter") applied. - This filter provides some basic security such as protection against - MAC spoofing, IP spoofing, and ARP spoofing. - - This filter drops all incoming ipv4 and ipv6 connections. - Outgoing connections are never blocked. - - Second, every security group maps to a nwfilter filter(*). - NWFilters can be updated at runtime and changes are applied - immediately, so changes to security groups can be applied at - runtime (as mandated by the spec). - - Security group rules are named "nova-secgroup-" where - is the internal id of the security group. They're applied only on - hosts that have instances in the security group in question. - - Updates to security groups are done by updating the data model - (in response to API calls) followed by a request sent to all - the nodes with instances in the security group to refresh the - security group. - - Each instance has its own NWFilter, which references the above - mentioned security group NWFilters. This was done because - interfaces can only reference one filter while filters can - reference multiple other filters. This has the added benefit of - actually being able to add and remove security groups from an - instance at run time. This functionality is not exposed anywhere, - though. - - Outstanding questions: - - The name is unique, so would there be any good reason to sync - the uuid across the nodes (by assigning it from the datamodel)? - - - (*) This sentence brought to you by the redundancy department of - redundancy. - - """ - - def __init__(self, get_connection, **kwargs): - self._libvirt_get_connection = get_connection - self.static_filters_configured = False - self.handle_security_groups = False - - def apply_instance_filter(self, instance): - """No-op. Everything is done in prepare_instance_filter""" - pass - - def _get_connection(self): - return self._libvirt_get_connection() - _conn = property(_get_connection) - - def nova_dhcp_filter(self): - """The standard allow-dhcp-server filter is an one, so it uses - ebtables to allow traffic through. Without a corresponding rule in - iptables, it'll get blocked anyway.""" - - return ''' - 891e4787-e5c0-d59b-cbd6-41bc3c6b36fc - - - - - - - ''' - - def nova_ra_filter(self): - return ''' - d707fa71-4fb5-4b27-9ab7-ba5ca19c8804 - - - - ''' - - def setup_basic_filtering(self, instance, network_info=None): - """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" - logging.info('called setup_basic_filtering in nwfilter') - - if not network_info: - network_info = _get_network_info(instance) - - if self.handle_security_groups: - # No point in setting up a filter set that we'll be overriding - # anyway. - return - - logging.info('ensuring static filters') - self._ensure_static_filters() - - if instance['image_id'] == str(FLAGS.vpn_image_id): - base_filter = 'nova-vpn' - else: - base_filter = 'nova-base' - - for (network, mapping) in network_info: - nic_id = mapping['mac'].replace(':', '') - instance_filter_name = self._instance_filter_name(instance, nic_id) - self._define_filter(self._filter_container(instance_filter_name, - [base_filter])) - - def _ensure_static_filters(self): - if self.static_filters_configured: - return - - self._define_filter(self._filter_container('nova-base', - ['no-mac-spoofing', - 'no-ip-spoofing', - 'no-arp-spoofing', - 'allow-dhcp-server'])) - self._define_filter(self._filter_container('nova-vpn', - ['allow-dhcp-server'])) - self._define_filter(self.nova_base_ipv4_filter) - self._define_filter(self.nova_base_ipv6_filter) - self._define_filter(self.nova_dhcp_filter) - self._define_filter(self.nova_ra_filter) - if FLAGS.allow_project_net_traffic: - self._define_filter(self.nova_project_filter) - if FLAGS.use_ipv6: - self._define_filter(self.nova_project_filter_v6) - - self.static_filters_configured = True - - def _filter_container(self, name, filters): - xml = '''%s''' % ( - name, - ''.join(["" % (f,) for f in filters])) - return xml - - def nova_base_ipv4_filter(self): - retval = "" - for protocol in ['tcp', 'udp', 'icmp']: - for direction, action, priority in [('out', 'accept', 399), - ('in', 'drop', 400)]: - retval += """ - <%s /> - """ % (action, direction, - priority, protocol) - retval += '' - return retval - - def nova_base_ipv6_filter(self): - retval = "" - for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: - for direction, action, priority in [('out', 'accept', 399), - ('in', 'drop', 400)]: - retval += """ - <%s /> - """ % (action, direction, - priority, protocol) - retval += '' - return retval - - def nova_project_filter(self): - retval = "" - for protocol in ['tcp', 'udp', 'icmp']: - retval += """ - <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' /> - """ % protocol - retval += '' - return retval - - def nova_project_filter_v6(self): - retval = "" - for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: - retval += """ - <%s srcipaddr='$PROJNETV6' - srcipmask='$PROJMASKV6' /> - """ % (protocol) - retval += '' - return retval - - def _define_filter(self, xml): - if callable(xml): - xml = xml() - # execute in a native thread and block current greenthread until done - tpool.execute(self._conn.nwfilterDefineXML, xml) - - def unfilter_instance(self, instance): - # Nothing to do - pass - - def prepare_instance_filter(self, instance, network_info=None): - """ - Creates an NWFilter for the given instance. In the process, - it makes sure the filters for the security groups as well as - the base filter are all in place. - """ - if not network_info: - network_info = _get_network_info(instance) - if instance['image_id'] == str(FLAGS.vpn_image_id): - base_filter = 'nova-vpn' - else: - base_filter = 'nova-base' - - ctxt = context.get_admin_context() - - instance_secgroup_filter_name = \ - '%s-secgroup' % (self._instance_filter_name(instance)) - #% (instance_filter_name,) - - instance_secgroup_filter_children = ['nova-base-ipv4', - 'nova-base-ipv6', - 'nova-allow-dhcp-server'] - - for security_group in \ - db.security_group_get_by_instance(ctxt, instance['id']): - - self.refresh_security_group_rules(security_group['id']) - - instance_secgroup_filter_children += [('nova-secgroup-%s' % - security_group['id'])] - - self._define_filter( - self._filter_container(instance_secgroup_filter_name, - instance_secgroup_filter_children)) - - for (network, mapping) in network_info: - nic_id = mapping['mac'].replace(':', '') - instance_filter_name = self._instance_filter_name(instance, nic_id) - instance_filter_children = \ - [base_filter, instance_secgroup_filter_name] - - if FLAGS.use_ipv6: - gateway_v6 = network['gateway_v6'] - - if gateway_v6: - instance_secgroup_filter_children += \ - ['nova-allow-ra-server'] - - if FLAGS.allow_project_net_traffic: - instance_filter_children += ['nova-project'] - if FLAGS.use_ipv6: - instance_filter_children += ['nova-project-v6'] - - self._define_filter( - self._filter_container(instance_filter_name, - instance_filter_children)) - - return - - def refresh_security_group_rules(self, security_group_id): - return self._define_filter( - self.security_group_to_nwfilter_xml(security_group_id)) - - def security_group_to_nwfilter_xml(self, security_group_id): - security_group = db.security_group_get(context.get_admin_context(), - security_group_id) - rule_xml = "" - v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} - for rule in security_group.rules: - rule_xml += "" - if rule.cidr: - version = _get_ip_version(rule.cidr) - if(FLAGS.use_ipv6 and version == 6): - net, prefixlen = _get_net_and_prefixlen(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (v6protocol[rule.protocol], net, prefixlen) - else: - net, mask = _get_net_and_mask(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (rule.protocol, net, mask) - if rule.protocol in ['tcp', 'udp']: - rule_xml += "dstportstart='%s' dstportend='%s' " % \ - (rule.from_port, rule.to_port) - elif rule.protocol == 'icmp': - LOG.info('rule.protocol: %r, rule.from_port: %r, ' - 'rule.to_port: %r', rule.protocol, - rule.from_port, rule.to_port) - if rule.from_port != -1: - rule_xml += "type='%s' " % rule.from_port - if rule.to_port != -1: - rule_xml += "code='%s' " % rule.to_port - - rule_xml += '/>\n' - rule_xml += "\n" - xml = " Date: Fri, 22 Apr 2011 15:26:45 -0400 Subject: Renamed test_virt.py to test_libvirt.py as per suggestion. --- nova/tests/test_libvirt.py | 886 +++++++++++++++++++++++++++++++++++++++++++++ nova/tests/test_virt.py | 886 --------------------------------------------- 2 files changed, 886 insertions(+), 886 deletions(-) create mode 100644 nova/tests/test_libvirt.py delete mode 100644 nova/tests/test_virt.py (limited to 'nova') diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py new file mode 100644 index 000000000..fd284c52b --- /dev/null +++ b/nova/tests/test_libvirt.py @@ -0,0 +1,886 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import mox +import os +import re +import sys + +from xml.etree.ElementTree import fromstring as xml_to_tree +from xml.dom.minidom import parseString as xml_to_dom + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import test +from nova import utils +from nova.api.ec2 import cloud +from nova.auth import manager +from nova.compute import manager as compute_manager +from nova.compute import power_state +from nova.db.sqlalchemy import models +from nova.virt.libvirt import connection +from nova.virt.libvirt import firewall + +libvirt = None +FLAGS = flags.FLAGS +flags.DECLARE('instances_path', 'nova.compute.manager') + + +def _concurrency(wait, done, target): + wait.wait() + done.send() + + +class CacheConcurrencyTestCase(test.TestCase): + def setUp(self): + super(CacheConcurrencyTestCase, self).setUp() + + def fake_exists(fname): + basedir = os.path.join(FLAGS.instances_path, '_base') + if fname == basedir: + return True + return False + + def fake_execute(*args, **kwargs): + pass + + self.stubs.Set(os.path, 'exists', fake_exists) + self.stubs.Set(utils, 'execute', fake_execute) + + def test_same_fname_concurrency(self): + """Ensures that the same fname cache runs at a sequentially""" + conn = connection.LibvirtConnection + wait1 = eventlet.event.Event() + done1 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname', False, wait1, done1) + wait2 = eventlet.event.Event() + done2 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname', False, wait2, done2) + wait2.send() + eventlet.sleep(0) + try: + self.assertFalse(done2.ready()) + finally: + wait1.send() + done1.wait() + eventlet.sleep(0) + self.assertTrue(done2.ready()) + + def test_different_fname_concurrency(self): + """Ensures that two different fname caches are concurrent""" + conn = connection.LibvirtConnection + wait1 = eventlet.event.Event() + done1 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname2', False, wait1, done1) + wait2 = eventlet.event.Event() + done2 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname1', False, wait2, done2) + wait2.send() + eventlet.sleep(0) + try: + self.assertTrue(done2.ready()) + finally: + wait1.send() + eventlet.sleep(0) + + +class LibvirtConnTestCase(test.TestCase): + def setUp(self): + super(LibvirtConnTestCase, self).setUp() + connection._late_load_cheetah() + self.flags(fake_call=True) + self.manager = manager.AuthManager() + + try: + pjs = self.manager.get_projects() + pjs = [p for p in pjs if p.name == 'fake'] + if 0 != len(pjs): + self.manager.delete_project(pjs[0]) + + users = self.manager.get_users() + users = [u for u in users if u.name == 'fake'] + if 0 != len(users): + self.manager.delete_user(users[0]) + except Exception, e: + pass + + users = self.manager.get_users() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.network = utils.import_object(FLAGS.network_manager) + self.context = context.get_admin_context() + FLAGS.instances_path = '' + self.call_libvirt_dependant_setup = False + + test_ip = '10.11.12.13' + test_instance = {'memory_kb': '1024000', + 'basepath': '/some/path', + 'bridge_name': 'br100', + 'mac_address': '02:12:34:46:56:67', + 'vcpus': 2, + 'project_id': 'fake', + 'bridge': 'br101', + 'instance_type_id': '5'} # m1.small + + def lazy_load_library_exists(self): + """check if libvirt is available.""" + # try to connect libvirt. if fail, skip test. + try: + import libvirt + import libxml2 + except ImportError: + return False + global libvirt + libvirt = __import__('libvirt') + connection.libvirt = __import__('libvirt') + connection.libxml2 = __import__('libxml2') + return True + + def create_fake_libvirt_mock(self, **kwargs): + """Defining mocks for LibvirtConnection(libvirt is not used).""" + + # A fake libvirt.virConnect + class FakeLibvirtConnection(object): + pass + + # A fake connection.IptablesFirewallDriver + class FakeIptablesFirewallDriver(object): + + def __init__(self, **kwargs): + pass + + def setattr(self, key, val): + self.__setattr__(key, val) + + # Creating mocks + fake = FakeLibvirtConnection() + fakeip = FakeIptablesFirewallDriver + # Customizing above fake if necessary + for key, val in kwargs.items(): + fake.__setattr__(key, val) + + # Inevitable mocks for connection.LibvirtConnection + self.mox.StubOutWithMock(connection.utils, 'import_class') + connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn = fake + + def create_service(self, **kwargs): + service_ref = {'host': kwargs.get('host', 'dummy'), + 'binary': 'nova-compute', + 'topic': 'compute', + 'report_count': 0, + 'availability_zone': 'zone'} + + return db.service_create(context.get_admin_context(), service_ref) + + def test_xml_and_uri_no_ramdisk_no_kernel(self): + instance_data = dict(self.test_instance) + self._check_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + + def test_xml_and_uri_no_ramdisk(self): + instance_data = dict(self.test_instance) + instance_data['kernel_id'] = 'aki-deadbeef' + self._check_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=False) + + def test_xml_and_uri_no_kernel(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + self._check_xml_and_uri(instance_data, + expect_kernel=False, expect_ramdisk=False) + + def test_xml_and_uri(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self._check_xml_and_uri(instance_data, + expect_kernel=True, expect_ramdisk=True) + + def test_xml_and_uri_rescue(self): + instance_data = dict(self.test_instance) + instance_data['ramdisk_id'] = 'ari-deadbeef' + instance_data['kernel_id'] = 'aki-deadbeef' + self._check_xml_and_uri(instance_data, expect_kernel=True, + expect_ramdisk=True, rescue=True) + + def test_lxc_container_and_uri(self): + instance_data = dict(self.test_instance) + self._check_xml_and_container(instance_data) + + def _check_xml_and_container(self, instance): + user_context = context.RequestContext(project=self.project, + user=self.user) + instance_ref = db.instance_create(user_context, instance) + host = self.network.get_network_host(user_context.elevated()) + network_ref = db.project_get_network(context.get_admin_context(), + self.project.id) + + fixed_ip = {'address': self.test_ip, + 'network_id': network_ref['id']} + + ctxt = context.get_admin_context() + fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) + db.fixed_ip_update(ctxt, self.test_ip, + {'allocated': True, + 'instance_id': instance_ref['id']}) + + self.flags(libvirt_type='lxc') + conn = connection.LibvirtConnection(True) + + uri = conn.get_uri() + self.assertEquals(uri, 'lxc:///') + + xml = conn.to_xml(instance_ref) + tree = xml_to_tree(xml) + + check = [ + (lambda t: t.find('.').get('type'), 'lxc'), + (lambda t: t.find('./os/type').text, 'exe'), + (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')] + + for i, (check, expected_result) in enumerate(check): + self.assertEqual(check(tree), + expected_result, + '%s failed common check %d' % (xml, i)) + + target = tree.find('./devices/filesystem/source').get('dir') + self.assertTrue(len(target) > 0) + + def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel, + rescue=False): + user_context = context.RequestContext(project=self.project, + user=self.user) + instance_ref = db.instance_create(user_context, instance) + host = self.network.get_network_host(user_context.elevated()) + network_ref = db.project_get_network(context.get_admin_context(), + self.project.id) + + fixed_ip = {'address': self.test_ip, + 'network_id': network_ref['id']} + + ctxt = context.get_admin_context() + fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) + db.fixed_ip_update(ctxt, self.test_ip, + {'allocated': True, + 'instance_id': instance_ref['id']}) + + type_uri_map = {'qemu': ('qemu:///system', + [(lambda t: t.find('.').get('type'), 'qemu'), + (lambda t: t.find('./os/type').text, 'hvm'), + (lambda t: t.find('./devices/emulator'), None)]), + 'kvm': ('qemu:///system', + [(lambda t: t.find('.').get('type'), 'kvm'), + (lambda t: t.find('./os/type').text, 'hvm'), + (lambda t: t.find('./devices/emulator'), None)]), + 'uml': ('uml:///system', + [(lambda t: t.find('.').get('type'), 'uml'), + (lambda t: t.find('./os/type').text, 'uml')]), + 'xen': ('xen:///', + [(lambda t: t.find('.').get('type'), 'xen'), + (lambda t: t.find('./os/type').text, 'linux')]), + } + + for hypervisor_type in ['qemu', 'kvm', 'xen']: + check_list = type_uri_map[hypervisor_type][1] + + if rescue: + check = (lambda t: t.find('./os/kernel').text.split('/')[1], + 'kernel.rescue') + check_list.append(check) + check = (lambda t: t.find('./os/initrd').text.split('/')[1], + 'ramdisk.rescue') + check_list.append(check) + else: + if expect_kernel: + check = (lambda t: t.find('./os/kernel').text.split( + '/')[1], 'kernel') + else: + check = (lambda t: t.find('./os/kernel'), None) + check_list.append(check) + + if expect_ramdisk: + check = (lambda t: t.find('./os/initrd').text.split( + '/')[1], 'ramdisk') + else: + check = (lambda t: t.find('./os/initrd'), None) + check_list.append(check) + + common_checks = [ + (lambda t: t.find('.').tag, 'domain'), + (lambda t: t.find( + './devices/interface/filterref/parameter').get('name'), 'IP'), + (lambda t: t.find( + './devices/interface/filterref/parameter').get( + 'value'), '10.11.12.13'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'name'), 'DHCPSERVER'), + (lambda t: t.findall( + './devices/interface/filterref/parameter')[1].get( + 'value'), '10.0.0.1'), + (lambda t: t.find('./devices/serial/source').get( + 'path').split('/')[1], 'console.log'), + (lambda t: t.find('./memory').text, '2097152')] + if rescue: + common_checks += [ + (lambda t: t.findall('./devices/disk/source')[0].get( + 'file').split('/')[1], 'disk.rescue'), + (lambda t: t.findall('./devices/disk/source')[1].get( + 'file').split('/')[1], 'disk')] + else: + common_checks += [(lambda t: t.findall( + './devices/disk/source')[0].get('file').split('/')[1], + 'disk')] + common_checks += [(lambda t: t.findall( + './devices/disk/source')[1].get('file').split('/')[1], + 'disk.local')] + + for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): + FLAGS.libvirt_type = libvirt_type + conn = connection.LibvirtConnection(True) + + uri = conn.get_uri() + self.assertEquals(uri, expected_uri) + + xml = conn.to_xml(instance_ref, rescue) + tree = xml_to_tree(xml) + for i, (check, expected_result) in enumerate(checks): + self.assertEqual(check(tree), + expected_result, + '%s failed check %d' % (xml, i)) + + for i, (check, expected_result) in enumerate(common_checks): + self.assertEqual(check(tree), + expected_result, + '%s failed common check %d' % (xml, i)) + + # This test is supposed to make sure we don't + # override a specifically set uri + # + # Deliberately not just assigning this string to FLAGS.libvirt_uri and + # checking against that later on. This way we make sure the + # implementation doesn't fiddle around with the FLAGS. + testuri = 'something completely different' + FLAGS.libvirt_uri = testuri + for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): + FLAGS.libvirt_type = libvirt_type + conn = connection.LibvirtConnection(True) + uri = conn.get_uri() + self.assertEquals(uri, testuri) + db.instance_destroy(user_context, instance_ref['id']) + + def test_update_available_resource_works_correctly(self): + """Confirm compute_node table is updated successfully.""" + org_path = FLAGS.instances_path = '' + FLAGS.instances_path = '.' + + # Prepare mocks + def getVersion(): + return 12003 + + def getType(): + return 'qemu' + + def listDomainsID(): + return [] + + service_ref = self.create_service(host='dummy') + self.create_fake_libvirt_mock(getVersion=getVersion, + getType=getType, + listDomainsID=listDomainsID) + self.mox.StubOutWithMock(connection.LibvirtConnection, + 'get_cpu_info') + connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') + + # Start test + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + conn.update_available_resource(self.context, 'dummy') + service_ref = db.service_get(self.context, service_ref['id']) + compute_node = service_ref['compute_node'][0] + + if sys.platform.upper() == 'LINUX2': + self.assertTrue(compute_node['vcpus'] >= 0) + self.assertTrue(compute_node['memory_mb'] > 0) + self.assertTrue(compute_node['local_gb'] > 0) + self.assertTrue(compute_node['vcpus_used'] == 0) + self.assertTrue(compute_node['memory_mb_used'] > 0) + self.assertTrue(compute_node['local_gb_used'] > 0) + self.assertTrue(len(compute_node['hypervisor_type']) > 0) + self.assertTrue(compute_node['hypervisor_version'] > 0) + else: + self.assertTrue(compute_node['vcpus'] >= 0) + self.assertTrue(compute_node['memory_mb'] == 0) + self.assertTrue(compute_node['local_gb'] > 0) + self.assertTrue(compute_node['vcpus_used'] == 0) + self.assertTrue(compute_node['memory_mb_used'] == 0) + self.assertTrue(compute_node['local_gb_used'] > 0) + self.assertTrue(len(compute_node['hypervisor_type']) > 0) + self.assertTrue(compute_node['hypervisor_version'] > 0) + + db.service_destroy(self.context, service_ref['id']) + FLAGS.instances_path = org_path + + def test_update_resource_info_no_compute_record_found(self): + """Raise exception if no recorde found on services table.""" + org_path = FLAGS.instances_path = '' + FLAGS.instances_path = '.' + self.create_fake_libvirt_mock() + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + self.assertRaises(exception.ComputeServiceUnavailable, + conn.update_available_resource, + self.context, 'dummy') + + FLAGS.instances_path = org_path + + def test_ensure_filtering_rules_for_instance_timeout(self): + """ensure_filtering_fules_for_instance() finishes with timeout.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Preparing mocks + def fake_none(self): + return + + def fake_raise(self): + raise libvirt.libvirtError('ERR') + + class FakeTime(object): + def __init__(self): + self.counter = 0 + + def sleep(self, t): + self.counter += t + + fake_timer = FakeTime() + + self.create_fake_libvirt_mock() + instance_ref = db.instance_create(self.context, self.test_instance) + + # Start test + self.mox.ReplayAll() + try: + conn = connection.LibvirtConnection(False) + conn.firewall_driver.setattr('setup_basic_filtering', fake_none) + conn.firewall_driver.setattr('prepare_instance_filter', fake_none) + conn.firewall_driver.setattr('instance_filter_exists', fake_none) + conn.ensure_filtering_rules_for_instance(instance_ref, + time=fake_timer) + except exception.Error, e: + c1 = (0 <= e.message.find('Timeout migrating for')) + self.assertTrue(c1) + + self.assertEqual(29, fake_timer.counter, "Didn't wait the expected " + "amount of time") + + db.instance_destroy(self.context, instance_ref['id']) + + def test_live_migration_raises_exception(self): + """Confirms recover method is called when exceptions are raised.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Preparing data + self.compute = utils.import_object(FLAGS.compute_manager) + instance_dict = {'host': 'fake', 'state': power_state.RUNNING, + 'state_description': 'running'} + instance_ref = db.instance_create(self.context, self.test_instance) + instance_ref = db.instance_update(self.context, instance_ref['id'], + instance_dict) + vol_dict = {'status': 'migrating', 'size': 1} + volume_ref = db.volume_create(self.context, vol_dict) + db.volume_attached(self.context, volume_ref['id'], instance_ref['id'], + '/dev/fake') + + # Preparing mocks + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "migrateToURI") + vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', + mox.IgnoreArg(), + None, FLAGS.live_migration_bandwidth).\ + AndRaise(libvirt.libvirtError('ERR')) + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return vdmock + + self.create_fake_libvirt_mock(lookupByName=fake_lookup) + + # Start test + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + self.assertRaises(libvirt.libvirtError, + conn._live_migration, + self.context, instance_ref, 'dest', '', + self.compute.recover_live_migration) + + instance_ref = db.instance_get(self.context, instance_ref['id']) + self.assertTrue(instance_ref['state_description'] == 'running') + self.assertTrue(instance_ref['state'] == power_state.RUNNING) + volume_ref = db.volume_get(self.context, volume_ref['id']) + self.assertTrue(volume_ref['status'] == 'in-use') + + db.volume_destroy(self.context, volume_ref['id']) + db.instance_destroy(self.context, instance_ref['id']) + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(LibvirtConnTestCase, self).tearDown() + + +class IptablesFirewallTestCase(test.TestCase): + def setUp(self): + super(IptablesFirewallTestCase, self).setUp() + + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext('fake', 'fake') + self.network = utils.import_object(FLAGS.network_manager) + + class FakeLibvirtConnection(object): + pass + self.fake_libvirt_connection = FakeLibvirtConnection() + self.fw = firewall.IptablesFirewallDriver( + get_connection=lambda: self.fake_libvirt_connection) + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(IptablesFirewallTestCase, self).tearDown() + + in_nat_rules = [ + '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011', + '*nat', + ':PREROUTING ACCEPT [1170:189210]', + ':INPUT ACCEPT [844:71028]', + ':OUTPUT ACCEPT [5149:405186]', + ':POSTROUTING ACCEPT [5063:386098]', + ] + + in_filter_rules = [ + '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', + '*filter', + ':INPUT ACCEPT [969615:281627771]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [915599:63811649]', + ':nova-block-ipv4 - [0:0]', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' + ',ESTABLISHED -j ACCEPT ', + '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ', + '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ', + 'COMMIT', + '# Completed on Mon Dec 6 11:54:13 2010', + ] + + in6_filter_rules = [ + '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011', + '*filter', + ':INPUT ACCEPT [349155:75810423]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [349256:75777230]', + 'COMMIT', + '# Completed on Tue Jan 18 23:47:56 2011', + ] + + def test_static_filters(self): + instance_ref = db.instance_create(self.context, + {'user_id': 'fake', + 'project_id': 'fake', + 'mac_address': '56:12:12:12:12:12', + 'instance_type_id': 1}) + ip = '10.11.12.13' + + network_ref = db.project_get_network(self.context, + 'fake') + + fixed_ip = {'address': ip, + 'network_id': network_ref['id']} + + admin_ctxt = context.get_admin_context() + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id']}) + + secgroup = db.security_group_create(admin_ctxt, + {'user_id': 'fake', + 'project_id': 'fake', + 'name': 'testgroup', + 'description': 'test group'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'icmp', + 'from_port': -1, + 'to_port': -1, + 'cidr': '192.168.11.0/24'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'icmp', + 'from_port': 8, + 'to_port': -1, + 'cidr': '192.168.11.0/24'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'tcp', + 'from_port': 80, + 'to_port': 81, + 'cidr': '192.168.10.0/24'}) + + db.instance_add_security_group(admin_ctxt, instance_ref['id'], + secgroup['id']) + instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) + +# self.fw.add_instance(instance_ref) + def fake_iptables_execute(*cmd, **kwargs): + process_input = kwargs.get('process_input', None) + if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'): + return '\n'.join(self.in6_filter_rules), None + if cmd == ('sudo', 'iptables-save', '-t', 'filter'): + return '\n'.join(self.in_filter_rules), None + if cmd == ('sudo', 'iptables-save', '-t', 'nat'): + return '\n'.join(self.in_nat_rules), None + if cmd == ('sudo', 'iptables-restore'): + lines = process_input.split('\n') + if '*filter' in lines: + self.out_rules = lines + return '', '' + if cmd == ('sudo', 'ip6tables-restore'): + lines = process_input.split('\n') + if '*filter' in lines: + self.out6_rules = lines + return '', '' + print cmd, kwargs + + from nova.network import linux_net + linux_net.iptables_manager.execute = fake_iptables_execute + + self.fw.prepare_instance_filter(instance_ref) + self.fw.apply_instance_filter(instance_ref) + + in_rules = filter(lambda l: not l.startswith('#'), + self.in_filter_rules) + for rule in in_rules: + if not 'nova' in rule: + self.assertTrue(rule in self.out_rules, + 'Rule went missing: %s' % rule) + + instance_chain = None + for rule in self.out_rules: + # This is pretty crude, but it'll do for now + if '-d 10.11.12.13 -j' in rule: + instance_chain = rule.split(' ')[-1] + break + self.assertTrue(instance_chain, "The instance chain wasn't added") + + security_group_chain = None + for rule in self.out_rules: + # This is pretty crude, but it'll do for now + if '-A %s -j' % instance_chain in rule: + security_group_chain = rule.split(' ')[-1] + break + self.assertTrue(security_group_chain, + "The security group chain wasn't added") + + regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT') + self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, + "ICMP acceptance rule wasn't added") + + regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp ' + '--icmp-type 8 -j ACCEPT') + self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, + "ICMP Echo Request acceptance rule wasn't added") + + regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport ' + '--dports 80:81 -j ACCEPT') + self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, + "TCP port 80/81 acceptance rule wasn't added") + db.instance_destroy(admin_ctxt, instance_ref['id']) + + +class NWFilterTestCase(test.TestCase): + def setUp(self): + super(NWFilterTestCase, self).setUp() + + class Mock(object): + pass + + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext(self.user, self.project) + + self.fake_libvirt_connection = Mock() + + self.fw = firewall.NWFilterFirewall( + lambda: self.fake_libvirt_connection) + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(NWFilterTestCase, self).tearDown() + + def test_cidr_rule_nwfilter_xml(self): + cloud_controller = cloud.CloudController() + cloud_controller.create_security_group(self.context, + 'testgroup', + 'test group description') + cloud_controller.authorize_security_group_ingress(self.context, + 'testgroup', + from_port='80', + to_port='81', + ip_protocol='tcp', + cidr_ip='0.0.0.0/0') + + security_group = db.security_group_get_by_name(self.context, + 'fake', + 'testgroup') + + xml = self.fw.security_group_to_nwfilter_xml(security_group.id) + + dom = xml_to_dom(xml) + self.assertEqual(dom.firstChild.tagName, 'filter') + + rules = dom.getElementsByTagName('rule') + self.assertEqual(len(rules), 1) + + # It's supposed to allow inbound traffic. + self.assertEqual(rules[0].getAttribute('action'), 'accept') + self.assertEqual(rules[0].getAttribute('direction'), 'in') + + # Must be lower priority than the base filter (which blocks everything) + self.assertTrue(int(rules[0].getAttribute('priority')) < 1000) + + ip_conditions = rules[0].getElementsByTagName('tcp') + self.assertEqual(len(ip_conditions), 1) + self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0') + self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0') + self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80') + self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81') + self.teardown_security_group() + + def teardown_security_group(self): + cloud_controller = cloud.CloudController() + cloud_controller.delete_security_group(self.context, 'testgroup') + + def setup_and_return_security_group(self): + cloud_controller = cloud.CloudController() + cloud_controller.create_security_group(self.context, + 'testgroup', + 'test group description') + cloud_controller.authorize_security_group_ingress(self.context, + 'testgroup', + from_port='80', + to_port='81', + ip_protocol='tcp', + cidr_ip='0.0.0.0/0') + + return db.security_group_get_by_name(self.context, 'fake', 'testgroup') + + def test_creates_base_rule_first(self): + # These come pre-defined by libvirt + self.defined_filters = ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'] + + self.recursive_depends = {} + for f in self.defined_filters: + self.recursive_depends[f] = [] + + def _filterDefineXMLMock(xml): + dom = xml_to_dom(xml) + name = dom.firstChild.getAttribute('name') + self.recursive_depends[name] = [] + for f in dom.getElementsByTagName('filterref'): + ref = f.getAttribute('filter') + self.assertTrue(ref in self.defined_filters, + ('%s referenced filter that does ' + + 'not yet exist: %s') % (name, ref)) + dependencies = [ref] + self.recursive_depends[ref] + self.recursive_depends[name] += dependencies + + self.defined_filters.append(name) + return True + + self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock + + instance_ref = db.instance_create(self.context, + {'user_id': 'fake', + 'project_id': 'fake', + 'mac_address': '00:A0:C9:14:C8:29', + 'instance_type_id': 1}) + inst_id = instance_ref['id'] + + ip = '10.11.12.13' + + network_ref = db.project_get_network(self.context, + 'fake') + + fixed_ip = {'address': ip, + 'network_id': network_ref['id']} + + admin_ctxt = context.get_admin_context() + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id']}) + + def _ensure_all_called(): + instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], + '00A0C914C829') + secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] + for required in [secgroup_filter, 'allow-dhcp-server', + 'no-arp-spoofing', 'no-ip-spoofing', + 'no-mac-spoofing']: + self.assertTrue(required in + self.recursive_depends[instance_filter], + "Instance's filter does not include %s" % + required) + + self.security_group = self.setup_and_return_security_group() + + db.instance_add_security_group(self.context, inst_id, + self.security_group.id) + instance = db.instance_get(self.context, inst_id) + + self.fw.setup_basic_filtering(instance) + self.fw.prepare_instance_filter(instance) + self.fw.apply_instance_filter(instance) + _ensure_all_called() + self.teardown_security_group() + db.instance_destroy(admin_ctxt, instance_ref['id']) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py deleted file mode 100644 index fd284c52b..000000000 --- a/nova/tests/test_virt.py +++ /dev/null @@ -1,886 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2010 OpenStack LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -import mox -import os -import re -import sys - -from xml.etree.ElementTree import fromstring as xml_to_tree -from xml.dom.minidom import parseString as xml_to_dom - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import test -from nova import utils -from nova.api.ec2 import cloud -from nova.auth import manager -from nova.compute import manager as compute_manager -from nova.compute import power_state -from nova.db.sqlalchemy import models -from nova.virt.libvirt import connection -from nova.virt.libvirt import firewall - -libvirt = None -FLAGS = flags.FLAGS -flags.DECLARE('instances_path', 'nova.compute.manager') - - -def _concurrency(wait, done, target): - wait.wait() - done.send() - - -class CacheConcurrencyTestCase(test.TestCase): - def setUp(self): - super(CacheConcurrencyTestCase, self).setUp() - - def fake_exists(fname): - basedir = os.path.join(FLAGS.instances_path, '_base') - if fname == basedir: - return True - return False - - def fake_execute(*args, **kwargs): - pass - - self.stubs.Set(os.path, 'exists', fake_exists) - self.stubs.Set(utils, 'execute', fake_execute) - - def test_same_fname_concurrency(self): - """Ensures that the same fname cache runs at a sequentially""" - conn = connection.LibvirtConnection - wait1 = eventlet.event.Event() - done1 = eventlet.event.Event() - eventlet.spawn(conn._cache_image, _concurrency, - 'target', 'fname', False, wait1, done1) - wait2 = eventlet.event.Event() - done2 = eventlet.event.Event() - eventlet.spawn(conn._cache_image, _concurrency, - 'target', 'fname', False, wait2, done2) - wait2.send() - eventlet.sleep(0) - try: - self.assertFalse(done2.ready()) - finally: - wait1.send() - done1.wait() - eventlet.sleep(0) - self.assertTrue(done2.ready()) - - def test_different_fname_concurrency(self): - """Ensures that two different fname caches are concurrent""" - conn = connection.LibvirtConnection - wait1 = eventlet.event.Event() - done1 = eventlet.event.Event() - eventlet.spawn(conn._cache_image, _concurrency, - 'target', 'fname2', False, wait1, done1) - wait2 = eventlet.event.Event() - done2 = eventlet.event.Event() - eventlet.spawn(conn._cache_image, _concurrency, - 'target', 'fname1', False, wait2, done2) - wait2.send() - eventlet.sleep(0) - try: - self.assertTrue(done2.ready()) - finally: - wait1.send() - eventlet.sleep(0) - - -class LibvirtConnTestCase(test.TestCase): - def setUp(self): - super(LibvirtConnTestCase, self).setUp() - connection._late_load_cheetah() - self.flags(fake_call=True) - self.manager = manager.AuthManager() - - try: - pjs = self.manager.get_projects() - pjs = [p for p in pjs if p.name == 'fake'] - if 0 != len(pjs): - self.manager.delete_project(pjs[0]) - - users = self.manager.get_users() - users = [u for u in users if u.name == 'fake'] - if 0 != len(users): - self.manager.delete_user(users[0]) - except Exception, e: - pass - - users = self.manager.get_users() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.network = utils.import_object(FLAGS.network_manager) - self.context = context.get_admin_context() - FLAGS.instances_path = '' - self.call_libvirt_dependant_setup = False - - test_ip = '10.11.12.13' - test_instance = {'memory_kb': '1024000', - 'basepath': '/some/path', - 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', - 'vcpus': 2, - 'project_id': 'fake', - 'bridge': 'br101', - 'instance_type_id': '5'} # m1.small - - def lazy_load_library_exists(self): - """check if libvirt is available.""" - # try to connect libvirt. if fail, skip test. - try: - import libvirt - import libxml2 - except ImportError: - return False - global libvirt - libvirt = __import__('libvirt') - connection.libvirt = __import__('libvirt') - connection.libxml2 = __import__('libxml2') - return True - - def create_fake_libvirt_mock(self, **kwargs): - """Defining mocks for LibvirtConnection(libvirt is not used).""" - - # A fake libvirt.virConnect - class FakeLibvirtConnection(object): - pass - - # A fake connection.IptablesFirewallDriver - class FakeIptablesFirewallDriver(object): - - def __init__(self, **kwargs): - pass - - def setattr(self, key, val): - self.__setattr__(key, val) - - # Creating mocks - fake = FakeLibvirtConnection() - fakeip = FakeIptablesFirewallDriver - # Customizing above fake if necessary - for key, val in kwargs.items(): - fake.__setattr__(key, val) - - # Inevitable mocks for connection.LibvirtConnection - self.mox.StubOutWithMock(connection.utils, 'import_class') - connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) - self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') - connection.LibvirtConnection._conn = fake - - def create_service(self, **kwargs): - service_ref = {'host': kwargs.get('host', 'dummy'), - 'binary': 'nova-compute', - 'topic': 'compute', - 'report_count': 0, - 'availability_zone': 'zone'} - - return db.service_create(context.get_admin_context(), service_ref) - - def test_xml_and_uri_no_ramdisk_no_kernel(self): - instance_data = dict(self.test_instance) - self._check_xml_and_uri(instance_data, - expect_kernel=False, expect_ramdisk=False) - - def test_xml_and_uri_no_ramdisk(self): - instance_data = dict(self.test_instance) - instance_data['kernel_id'] = 'aki-deadbeef' - self._check_xml_and_uri(instance_data, - expect_kernel=True, expect_ramdisk=False) - - def test_xml_and_uri_no_kernel(self): - instance_data = dict(self.test_instance) - instance_data['ramdisk_id'] = 'ari-deadbeef' - self._check_xml_and_uri(instance_data, - expect_kernel=False, expect_ramdisk=False) - - def test_xml_and_uri(self): - instance_data = dict(self.test_instance) - instance_data['ramdisk_id'] = 'ari-deadbeef' - instance_data['kernel_id'] = 'aki-deadbeef' - self._check_xml_and_uri(instance_data, - expect_kernel=True, expect_ramdisk=True) - - def test_xml_and_uri_rescue(self): - instance_data = dict(self.test_instance) - instance_data['ramdisk_id'] = 'ari-deadbeef' - instance_data['kernel_id'] = 'aki-deadbeef' - self._check_xml_and_uri(instance_data, expect_kernel=True, - expect_ramdisk=True, rescue=True) - - def test_lxc_container_and_uri(self): - instance_data = dict(self.test_instance) - self._check_xml_and_container(instance_data) - - def _check_xml_and_container(self, instance): - user_context = context.RequestContext(project=self.project, - user=self.user) - instance_ref = db.instance_create(user_context, instance) - host = self.network.get_network_host(user_context.elevated()) - network_ref = db.project_get_network(context.get_admin_context(), - self.project.id) - - fixed_ip = {'address': self.test_ip, - 'network_id': network_ref['id']} - - ctxt = context.get_admin_context() - fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, self.test_ip, - {'allocated': True, - 'instance_id': instance_ref['id']}) - - self.flags(libvirt_type='lxc') - conn = connection.LibvirtConnection(True) - - uri = conn.get_uri() - self.assertEquals(uri, 'lxc:///') - - xml = conn.to_xml(instance_ref) - tree = xml_to_tree(xml) - - check = [ - (lambda t: t.find('.').get('type'), 'lxc'), - (lambda t: t.find('./os/type').text, 'exe'), - (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')] - - for i, (check, expected_result) in enumerate(check): - self.assertEqual(check(tree), - expected_result, - '%s failed common check %d' % (xml, i)) - - target = tree.find('./devices/filesystem/source').get('dir') - self.assertTrue(len(target) > 0) - - def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel, - rescue=False): - user_context = context.RequestContext(project=self.project, - user=self.user) - instance_ref = db.instance_create(user_context, instance) - host = self.network.get_network_host(user_context.elevated()) - network_ref = db.project_get_network(context.get_admin_context(), - self.project.id) - - fixed_ip = {'address': self.test_ip, - 'network_id': network_ref['id']} - - ctxt = context.get_admin_context() - fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, self.test_ip, - {'allocated': True, - 'instance_id': instance_ref['id']}) - - type_uri_map = {'qemu': ('qemu:///system', - [(lambda t: t.find('.').get('type'), 'qemu'), - (lambda t: t.find('./os/type').text, 'hvm'), - (lambda t: t.find('./devices/emulator'), None)]), - 'kvm': ('qemu:///system', - [(lambda t: t.find('.').get('type'), 'kvm'), - (lambda t: t.find('./os/type').text, 'hvm'), - (lambda t: t.find('./devices/emulator'), None)]), - 'uml': ('uml:///system', - [(lambda t: t.find('.').get('type'), 'uml'), - (lambda t: t.find('./os/type').text, 'uml')]), - 'xen': ('xen:///', - [(lambda t: t.find('.').get('type'), 'xen'), - (lambda t: t.find('./os/type').text, 'linux')]), - } - - for hypervisor_type in ['qemu', 'kvm', 'xen']: - check_list = type_uri_map[hypervisor_type][1] - - if rescue: - check = (lambda t: t.find('./os/kernel').text.split('/')[1], - 'kernel.rescue') - check_list.append(check) - check = (lambda t: t.find('./os/initrd').text.split('/')[1], - 'ramdisk.rescue') - check_list.append(check) - else: - if expect_kernel: - check = (lambda t: t.find('./os/kernel').text.split( - '/')[1], 'kernel') - else: - check = (lambda t: t.find('./os/kernel'), None) - check_list.append(check) - - if expect_ramdisk: - check = (lambda t: t.find('./os/initrd').text.split( - '/')[1], 'ramdisk') - else: - check = (lambda t: t.find('./os/initrd'), None) - check_list.append(check) - - common_checks = [ - (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find( - './devices/interface/filterref/parameter').get('name'), 'IP'), - (lambda t: t.find( - './devices/interface/filterref/parameter').get( - 'value'), '10.11.12.13'), - (lambda t: t.findall( - './devices/interface/filterref/parameter')[1].get( - 'name'), 'DHCPSERVER'), - (lambda t: t.findall( - './devices/interface/filterref/parameter')[1].get( - 'value'), '10.0.0.1'), - (lambda t: t.find('./devices/serial/source').get( - 'path').split('/')[1], 'console.log'), - (lambda t: t.find('./memory').text, '2097152')] - if rescue: - common_checks += [ - (lambda t: t.findall('./devices/disk/source')[0].get( - 'file').split('/')[1], 'disk.rescue'), - (lambda t: t.findall('./devices/disk/source')[1].get( - 'file').split('/')[1], 'disk')] - else: - common_checks += [(lambda t: t.findall( - './devices/disk/source')[0].get('file').split('/')[1], - 'disk')] - common_checks += [(lambda t: t.findall( - './devices/disk/source')[1].get('file').split('/')[1], - 'disk.local')] - - for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): - FLAGS.libvirt_type = libvirt_type - conn = connection.LibvirtConnection(True) - - uri = conn.get_uri() - self.assertEquals(uri, expected_uri) - - xml = conn.to_xml(instance_ref, rescue) - tree = xml_to_tree(xml) - for i, (check, expected_result) in enumerate(checks): - self.assertEqual(check(tree), - expected_result, - '%s failed check %d' % (xml, i)) - - for i, (check, expected_result) in enumerate(common_checks): - self.assertEqual(check(tree), - expected_result, - '%s failed common check %d' % (xml, i)) - - # This test is supposed to make sure we don't - # override a specifically set uri - # - # Deliberately not just assigning this string to FLAGS.libvirt_uri and - # checking against that later on. This way we make sure the - # implementation doesn't fiddle around with the FLAGS. - testuri = 'something completely different' - FLAGS.libvirt_uri = testuri - for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): - FLAGS.libvirt_type = libvirt_type - conn = connection.LibvirtConnection(True) - uri = conn.get_uri() - self.assertEquals(uri, testuri) - db.instance_destroy(user_context, instance_ref['id']) - - def test_update_available_resource_works_correctly(self): - """Confirm compute_node table is updated successfully.""" - org_path = FLAGS.instances_path = '' - FLAGS.instances_path = '.' - - # Prepare mocks - def getVersion(): - return 12003 - - def getType(): - return 'qemu' - - def listDomainsID(): - return [] - - service_ref = self.create_service(host='dummy') - self.create_fake_libvirt_mock(getVersion=getVersion, - getType=getType, - listDomainsID=listDomainsID) - self.mox.StubOutWithMock(connection.LibvirtConnection, - 'get_cpu_info') - connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') - - # Start test - self.mox.ReplayAll() - conn = connection.LibvirtConnection(False) - conn.update_available_resource(self.context, 'dummy') - service_ref = db.service_get(self.context, service_ref['id']) - compute_node = service_ref['compute_node'][0] - - if sys.platform.upper() == 'LINUX2': - self.assertTrue(compute_node['vcpus'] >= 0) - self.assertTrue(compute_node['memory_mb'] > 0) - self.assertTrue(compute_node['local_gb'] > 0) - self.assertTrue(compute_node['vcpus_used'] == 0) - self.assertTrue(compute_node['memory_mb_used'] > 0) - self.assertTrue(compute_node['local_gb_used'] > 0) - self.assertTrue(len(compute_node['hypervisor_type']) > 0) - self.assertTrue(compute_node['hypervisor_version'] > 0) - else: - self.assertTrue(compute_node['vcpus'] >= 0) - self.assertTrue(compute_node['memory_mb'] == 0) - self.assertTrue(compute_node['local_gb'] > 0) - self.assertTrue(compute_node['vcpus_used'] == 0) - self.assertTrue(compute_node['memory_mb_used'] == 0) - self.assertTrue(compute_node['local_gb_used'] > 0) - self.assertTrue(len(compute_node['hypervisor_type']) > 0) - self.assertTrue(compute_node['hypervisor_version'] > 0) - - db.service_destroy(self.context, service_ref['id']) - FLAGS.instances_path = org_path - - def test_update_resource_info_no_compute_record_found(self): - """Raise exception if no recorde found on services table.""" - org_path = FLAGS.instances_path = '' - FLAGS.instances_path = '.' - self.create_fake_libvirt_mock() - - self.mox.ReplayAll() - conn = connection.LibvirtConnection(False) - self.assertRaises(exception.ComputeServiceUnavailable, - conn.update_available_resource, - self.context, 'dummy') - - FLAGS.instances_path = org_path - - def test_ensure_filtering_rules_for_instance_timeout(self): - """ensure_filtering_fules_for_instance() finishes with timeout.""" - # Skip if non-libvirt environment - if not self.lazy_load_library_exists(): - return - - # Preparing mocks - def fake_none(self): - return - - def fake_raise(self): - raise libvirt.libvirtError('ERR') - - class FakeTime(object): - def __init__(self): - self.counter = 0 - - def sleep(self, t): - self.counter += t - - fake_timer = FakeTime() - - self.create_fake_libvirt_mock() - instance_ref = db.instance_create(self.context, self.test_instance) - - # Start test - self.mox.ReplayAll() - try: - conn = connection.LibvirtConnection(False) - conn.firewall_driver.setattr('setup_basic_filtering', fake_none) - conn.firewall_driver.setattr('prepare_instance_filter', fake_none) - conn.firewall_driver.setattr('instance_filter_exists', fake_none) - conn.ensure_filtering_rules_for_instance(instance_ref, - time=fake_timer) - except exception.Error, e: - c1 = (0 <= e.message.find('Timeout migrating for')) - self.assertTrue(c1) - - self.assertEqual(29, fake_timer.counter, "Didn't wait the expected " - "amount of time") - - db.instance_destroy(self.context, instance_ref['id']) - - def test_live_migration_raises_exception(self): - """Confirms recover method is called when exceptions are raised.""" - # Skip if non-libvirt environment - if not self.lazy_load_library_exists(): - return - - # Preparing data - self.compute = utils.import_object(FLAGS.compute_manager) - instance_dict = {'host': 'fake', 'state': power_state.RUNNING, - 'state_description': 'running'} - instance_ref = db.instance_create(self.context, self.test_instance) - instance_ref = db.instance_update(self.context, instance_ref['id'], - instance_dict) - vol_dict = {'status': 'migrating', 'size': 1} - volume_ref = db.volume_create(self.context, vol_dict) - db.volume_attached(self.context, volume_ref['id'], instance_ref['id'], - '/dev/fake') - - # Preparing mocks - vdmock = self.mox.CreateMock(libvirt.virDomain) - self.mox.StubOutWithMock(vdmock, "migrateToURI") - vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', - mox.IgnoreArg(), - None, FLAGS.live_migration_bandwidth).\ - AndRaise(libvirt.libvirtError('ERR')) - - def fake_lookup(instance_name): - if instance_name == instance_ref.name: - return vdmock - - self.create_fake_libvirt_mock(lookupByName=fake_lookup) - - # Start test - self.mox.ReplayAll() - conn = connection.LibvirtConnection(False) - self.assertRaises(libvirt.libvirtError, - conn._live_migration, - self.context, instance_ref, 'dest', '', - self.compute.recover_live_migration) - - instance_ref = db.instance_get(self.context, instance_ref['id']) - self.assertTrue(instance_ref['state_description'] == 'running') - self.assertTrue(instance_ref['state'] == power_state.RUNNING) - volume_ref = db.volume_get(self.context, volume_ref['id']) - self.assertTrue(volume_ref['status'] == 'in-use') - - db.volume_destroy(self.context, volume_ref['id']) - db.instance_destroy(self.context, instance_ref['id']) - - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(LibvirtConnTestCase, self).tearDown() - - -class IptablesFirewallTestCase(test.TestCase): - def setUp(self): - super(IptablesFirewallTestCase, self).setUp() - - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.RequestContext('fake', 'fake') - self.network = utils.import_object(FLAGS.network_manager) - - class FakeLibvirtConnection(object): - pass - self.fake_libvirt_connection = FakeLibvirtConnection() - self.fw = firewall.IptablesFirewallDriver( - get_connection=lambda: self.fake_libvirt_connection) - - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(IptablesFirewallTestCase, self).tearDown() - - in_nat_rules = [ - '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011', - '*nat', - ':PREROUTING ACCEPT [1170:189210]', - ':INPUT ACCEPT [844:71028]', - ':OUTPUT ACCEPT [5149:405186]', - ':POSTROUTING ACCEPT [5063:386098]', - ] - - in_filter_rules = [ - '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', - '*filter', - ':INPUT ACCEPT [969615:281627771]', - ':FORWARD ACCEPT [0:0]', - ':OUTPUT ACCEPT [915599:63811649]', - ':nova-block-ipv4 - [0:0]', - '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', - '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' - ',ESTABLISHED -j ACCEPT ', - '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', - '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', - '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ', - '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ', - 'COMMIT', - '# Completed on Mon Dec 6 11:54:13 2010', - ] - - in6_filter_rules = [ - '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011', - '*filter', - ':INPUT ACCEPT [349155:75810423]', - ':FORWARD ACCEPT [0:0]', - ':OUTPUT ACCEPT [349256:75777230]', - 'COMMIT', - '# Completed on Tue Jan 18 23:47:56 2011', - ] - - def test_static_filters(self): - instance_ref = db.instance_create(self.context, - {'user_id': 'fake', - 'project_id': 'fake', - 'mac_address': '56:12:12:12:12:12', - 'instance_type_id': 1}) - ip = '10.11.12.13' - - network_ref = db.project_get_network(self.context, - 'fake') - - fixed_ip = {'address': ip, - 'network_id': network_ref['id']} - - admin_ctxt = context.get_admin_context() - db.fixed_ip_create(admin_ctxt, fixed_ip) - db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, - 'instance_id': instance_ref['id']}) - - secgroup = db.security_group_create(admin_ctxt, - {'user_id': 'fake', - 'project_id': 'fake', - 'name': 'testgroup', - 'description': 'test group'}) - - db.security_group_rule_create(admin_ctxt, - {'parent_group_id': secgroup['id'], - 'protocol': 'icmp', - 'from_port': -1, - 'to_port': -1, - 'cidr': '192.168.11.0/24'}) - - db.security_group_rule_create(admin_ctxt, - {'parent_group_id': secgroup['id'], - 'protocol': 'icmp', - 'from_port': 8, - 'to_port': -1, - 'cidr': '192.168.11.0/24'}) - - db.security_group_rule_create(admin_ctxt, - {'parent_group_id': secgroup['id'], - 'protocol': 'tcp', - 'from_port': 80, - 'to_port': 81, - 'cidr': '192.168.10.0/24'}) - - db.instance_add_security_group(admin_ctxt, instance_ref['id'], - secgroup['id']) - instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) - -# self.fw.add_instance(instance_ref) - def fake_iptables_execute(*cmd, **kwargs): - process_input = kwargs.get('process_input', None) - if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'): - return '\n'.join(self.in6_filter_rules), None - if cmd == ('sudo', 'iptables-save', '-t', 'filter'): - return '\n'.join(self.in_filter_rules), None - if cmd == ('sudo', 'iptables-save', '-t', 'nat'): - return '\n'.join(self.in_nat_rules), None - if cmd == ('sudo', 'iptables-restore'): - lines = process_input.split('\n') - if '*filter' in lines: - self.out_rules = lines - return '', '' - if cmd == ('sudo', 'ip6tables-restore'): - lines = process_input.split('\n') - if '*filter' in lines: - self.out6_rules = lines - return '', '' - print cmd, kwargs - - from nova.network import linux_net - linux_net.iptables_manager.execute = fake_iptables_execute - - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) - - in_rules = filter(lambda l: not l.startswith('#'), - self.in_filter_rules) - for rule in in_rules: - if not 'nova' in rule: - self.assertTrue(rule in self.out_rules, - 'Rule went missing: %s' % rule) - - instance_chain = None - for rule in self.out_rules: - # This is pretty crude, but it'll do for now - if '-d 10.11.12.13 -j' in rule: - instance_chain = rule.split(' ')[-1] - break - self.assertTrue(instance_chain, "The instance chain wasn't added") - - security_group_chain = None - for rule in self.out_rules: - # This is pretty crude, but it'll do for now - if '-A %s -j' % instance_chain in rule: - security_group_chain = rule.split(' ')[-1] - break - self.assertTrue(security_group_chain, - "The security group chain wasn't added") - - regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT') - self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, - "ICMP acceptance rule wasn't added") - - regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp ' - '--icmp-type 8 -j ACCEPT') - self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, - "ICMP Echo Request acceptance rule wasn't added") - - regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport ' - '--dports 80:81 -j ACCEPT') - self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, - "TCP port 80/81 acceptance rule wasn't added") - db.instance_destroy(admin_ctxt, instance_ref['id']) - - -class NWFilterTestCase(test.TestCase): - def setUp(self): - super(NWFilterTestCase, self).setUp() - - class Mock(object): - pass - - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.RequestContext(self.user, self.project) - - self.fake_libvirt_connection = Mock() - - self.fw = firewall.NWFilterFirewall( - lambda: self.fake_libvirt_connection) - - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(NWFilterTestCase, self).tearDown() - - def test_cidr_rule_nwfilter_xml(self): - cloud_controller = cloud.CloudController() - cloud_controller.create_security_group(self.context, - 'testgroup', - 'test group description') - cloud_controller.authorize_security_group_ingress(self.context, - 'testgroup', - from_port='80', - to_port='81', - ip_protocol='tcp', - cidr_ip='0.0.0.0/0') - - security_group = db.security_group_get_by_name(self.context, - 'fake', - 'testgroup') - - xml = self.fw.security_group_to_nwfilter_xml(security_group.id) - - dom = xml_to_dom(xml) - self.assertEqual(dom.firstChild.tagName, 'filter') - - rules = dom.getElementsByTagName('rule') - self.assertEqual(len(rules), 1) - - # It's supposed to allow inbound traffic. - self.assertEqual(rules[0].getAttribute('action'), 'accept') - self.assertEqual(rules[0].getAttribute('direction'), 'in') - - # Must be lower priority than the base filter (which blocks everything) - self.assertTrue(int(rules[0].getAttribute('priority')) < 1000) - - ip_conditions = rules[0].getElementsByTagName('tcp') - self.assertEqual(len(ip_conditions), 1) - self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0') - self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0') - self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80') - self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81') - self.teardown_security_group() - - def teardown_security_group(self): - cloud_controller = cloud.CloudController() - cloud_controller.delete_security_group(self.context, 'testgroup') - - def setup_and_return_security_group(self): - cloud_controller = cloud.CloudController() - cloud_controller.create_security_group(self.context, - 'testgroup', - 'test group description') - cloud_controller.authorize_security_group_ingress(self.context, - 'testgroup', - from_port='80', - to_port='81', - ip_protocol='tcp', - cidr_ip='0.0.0.0/0') - - return db.security_group_get_by_name(self.context, 'fake', 'testgroup') - - def test_creates_base_rule_first(self): - # These come pre-defined by libvirt - self.defined_filters = ['no-mac-spoofing', - 'no-ip-spoofing', - 'no-arp-spoofing', - 'allow-dhcp-server'] - - self.recursive_depends = {} - for f in self.defined_filters: - self.recursive_depends[f] = [] - - def _filterDefineXMLMock(xml): - dom = xml_to_dom(xml) - name = dom.firstChild.getAttribute('name') - self.recursive_depends[name] = [] - for f in dom.getElementsByTagName('filterref'): - ref = f.getAttribute('filter') - self.assertTrue(ref in self.defined_filters, - ('%s referenced filter that does ' + - 'not yet exist: %s') % (name, ref)) - dependencies = [ref] + self.recursive_depends[ref] - self.recursive_depends[name] += dependencies - - self.defined_filters.append(name) - return True - - self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock - - instance_ref = db.instance_create(self.context, - {'user_id': 'fake', - 'project_id': 'fake', - 'mac_address': '00:A0:C9:14:C8:29', - 'instance_type_id': 1}) - inst_id = instance_ref['id'] - - ip = '10.11.12.13' - - network_ref = db.project_get_network(self.context, - 'fake') - - fixed_ip = {'address': ip, - 'network_id': network_ref['id']} - - admin_ctxt = context.get_admin_context() - db.fixed_ip_create(admin_ctxt, fixed_ip) - db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, - 'instance_id': instance_ref['id']}) - - def _ensure_all_called(): - instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], - '00A0C914C829') - secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] - for required in [secgroup_filter, 'allow-dhcp-server', - 'no-arp-spoofing', 'no-ip-spoofing', - 'no-mac-spoofing']: - self.assertTrue(required in - self.recursive_depends[instance_filter], - "Instance's filter does not include %s" % - required) - - self.security_group = self.setup_and_return_security_group() - - db.instance_add_security_group(self.context, inst_id, - self.security_group.id) - instance = db.instance_get(self.context, inst_id) - - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) - _ensure_all_called() - self.teardown_security_group() - db.instance_destroy(admin_ctxt, instance_ref['id']) -- cgit From e171e7511c6b1a2baacf0ab9857643cc1fe82eb3 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Tue, 3 May 2011 17:08:04 -0700 Subject: Fix indentation. --- nova/virt/xenapi/volume_utils.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 819c48be5..55c11a4ad 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -246,19 +246,21 @@ def _get_target(volume_id): 1) volume_ref['host'] must resolve to something rather than loopback """ volume_ref = db.volume_get(context.get_admin_context(), - volume_id) + volume_id) result = (None, None) try: - (r, _e) = utils.execute('sudo', 'iscsiadm', '-m', 'discovery', - '-t', 'sendtargets', '-p', volume_ref['host']) + (r, _e) = utils.execute('sudo', 'iscsiadm', + '-m', 'discovery', + '-t', 'sendtargets', + '-p', volume_ref['host']) except exception.ProcessExecutionError, exc: LOG.exception(exc) else: volume_name = "volume-%08x" % volume_id for target in r.splitlines(): if FLAGS.iscsi_ip_prefix in target and volume_name in target: - (location, _sep, iscsi_name) = target.partition(" ") - break + (location, _sep, iscsi_name) = target.partition(" ") + break iscsi_portal = location.split(",")[0] result = (iscsi_name, iscsi_portal) return result -- cgit From 389f7c79199d5ad908a72375a7377a1122f36707 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 17:52:26 +0900 Subject: volume/driver: factor out lvm opration Factor out lvm operation for implementing basic snapshot later. --- nova/volume/driver.py | 62 ++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 28 deletions(-) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 3f3caf37a..9591c93d0 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -90,16 +90,40 @@ class VolumeDriver(object): raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) + def _create_volume(self, volume_name, sizestr): + self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', + volume_name, FLAGS.volume_group) + + def _copy_volume(self, srcstr, deststr, size_in_g): + self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), 'bs=1M') + + def _volume_not_present(self, volume_name): + path_name = '%s/%s' % (FLAGS.volume_group, volume_name) + try: + self._try_execute('sudo', 'lvdisplay', path_name) + except Exception as e: + # If the volume isn't present + return True + return False + + def _delete_volume(self, volume, size_in_g): + """Deletes a logical volume.""" + # zero out old volumes to prevent data leaking between users + # TODO(ja): reclaiming space should be done lazy and low priority + self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) + self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % + (FLAGS.volume_group, volume['name'])) + + def _sizestr(self, size_in_g): + if int(size_in_g) == 0: + return '100M' + return '%sG' % size_in_g + def create_volume(self, volume): """Creates a logical volume. Can optionally return a Dictionary of changes to the volume object to be persisted.""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] - self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', - volume['name'], - FLAGS.volume_group) + self._create_volume(volume['name'], self._sizestr(volume['size'])) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" @@ -107,24 +131,10 @@ class VolumeDriver(object): def delete_volume(self, volume): """Deletes a logical volume.""" - try: - self._try_execute('sudo', 'lvdisplay', - '%s/%s' % - (FLAGS.volume_group, - volume['name'])) - except Exception as e: + if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True - - # zero out old volumes to prevent data leaking between users - # TODO(ja): reclaiming space should be done lazy and low priority - self._execute('sudo', 'dd', 'if=/dev/zero', - 'of=%s' % self.local_path(volume), - 'count=%d' % (volume['size'] * 1024), - 'bs=1M') - self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, - volume['name'])) + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" @@ -612,13 +622,9 @@ class SheepdogDriver(VolumeDriver): def create_volume(self, volume): """Creates a sheepdog volume""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] self._try_execute('qemu-img', 'create', "sheepdog:%s" % volume['name'], - sizestr) + self._sizestr(volume['size'])) def create_volume_from_snapshot(self, volume, snapshot): """Creates a sheepdog volume from a snapshot.""" -- cgit From 03c735bb186a44d80a9d595e00e9c06fd8f709cc Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 17:53:25 +0900 Subject: volume/driver: implement basic snapshot/clone added basic support for snapshot/clone to VolumeDriver. The implementation is not effective, but works. The effective implementation should be done by drived driver class. --- nova/exception.py | 6 ++++++ nova/volume/driver.py | 42 +++++++++++++++++++++++++++++++++++++----- nova/volume/manager.py | 6 ++++++ 3 files changed, 49 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/exception.py b/nova/exception.py index 2dffeb795..6748ef265 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -79,6 +79,12 @@ class VolumeNotFound(NotFound): super(VolumeNotFound, self).__init__(message) +class VolumeIsBusy(Error): + def __init__(self, message, volume_id): + self.volume_id = volume_id + super(Error, self).__init__(message) + + class SnapshotNotFound(NotFound): def __init__(self, message, snapshot_id): self.snapshot_id = snapshot_id diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 9591c93d0..457a1c9e6 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -113,13 +113,21 @@ class VolumeDriver(object): # TODO(ja): reclaiming space should be done lazy and low priority self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, volume['name'])) + (FLAGS.volume_group, + self._escape_snapshot(volume['name']))) def _sizestr(self, size_in_g): if int(size_in_g) == 0: return '100M' return '%sG' % size_in_g + # Linux LVM reserves name that starts with snapshot, so that + # such volume name can't be created. Mangle it. + def _escape_snapshot(self, snapshot_name): + if not snapshot_name.startswith('snapshot'): + return snapshot_name + return '_' + snapshot_name + def create_volume(self, volume): """Creates a logical volume. Can optionally return a Dictionary of changes to the volume object to be persisted.""" @@ -127,27 +135,51 @@ class VolumeDriver(object): def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" - raise NotImplementedError() + self._create_volume(volume['name'], self._sizestr(volume['size'])) + self._copy_volume(self.local_path(snapshot), self.local_path(volume), + snapshot['volume_size']) def delete_volume(self, volume): """Deletes a logical volume.""" if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True + + # TODO(yamahata): lvm can't delete origin volume only without + # deleting derived snapshots. Can we do something fancy? + out, err = self._execute('sudo', 'lvdisplay', '--noheading', + '-C', '-o', 'Attr', + '%s/%s' % (FLAGS.volume_group, + volume['name'])) + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy( + _('deleting volume %s that has snapshot'), volume['name']) + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" - raise NotImplementedError() + orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name']) + self._try_execute('sudo', 'lvcreate', '-L', + self._sizestr(snapshot['volume_size']), + '--name', self._escape_snapshot(snapshot['name']), + '--snapshot', orig_lv_name) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" - raise NotImplementedError() + if self._volume_not_present(self._escape_snapshot(snapshot['name'])): + # If the snapshot isn't present, then don't attempt to delete + return True + + # TODO(yamahata): zeroing out the whole snapshot triggers COW. + # it's quite slow. + self._delete_volume(snapshot, snapshot['volume_size']) def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') - escaped_name = volume['name'].replace('-', '--') + escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def ensure_export(self, context, volume): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 7d47fc191..84085fbd8 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -147,6 +147,12 @@ class VolumeManager(manager.SchedulerDependentManager): self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) + except exception.VolumeIsBusy, e: + LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) + self.driver.ensure_export(context, volume_ref) + self.db.volume_update(context, volume_ref['id'], + {'status': 'available'}) + return True except Exception: self.db.volume_update(context, volume_ref['id'], -- cgit From db148f108dfc4829e1302a54fe4f57ab81212786 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 19:25:02 +0900 Subject: fix mismerge by 1059 --- nova/db/sqlalchemy/api.py | 3 +-- nova/exception.py | 65 ++++++----------------------------------------- nova/volume/driver.py | 3 +-- 3 files changed, 10 insertions(+), 61 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ebdb2ad5c..7302f25b0 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1798,8 +1798,7 @@ def snapshot_get(context, snapshot_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.SnapshotNotFound(_('Snapshot %s not found') % snapshot_id, - snapshot_id) + raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result diff --git a/nova/exception.py b/nova/exception.py index 6748ef265..b16ea6810 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -60,65 +60,8 @@ class ApiError(Error): class BuildInProgress(Error): - super(ApiError, self).__init__('%s: %s' % (code, message)) - - -class NotFound(Error): - pass - - -class InstanceNotFound(NotFound): - def __init__(self, message, instance_id): - self.instance_id = instance_id - super(InstanceNotFound, self).__init__(message) - - -class VolumeNotFound(NotFound): - def __init__(self, message, volume_id): - self.volume_id = volume_id - super(VolumeNotFound, self).__init__(message) - - -class VolumeIsBusy(Error): - def __init__(self, message, volume_id): - self.volume_id = volume_id - super(Error, self).__init__(message) - - -class SnapshotNotFound(NotFound): - def __init__(self, message, snapshot_id): - self.snapshot_id = snapshot_id - super(SnapshotNotFound, self).__init__(message) - - -class Duplicate(Error): pass - -class NotAuthorized(Error): - pass - - -class NotEmpty(Error): - pass - - -class Invalid(Error): - pass - - -class InvalidInputException(Error): - pass - - -class InvalidContentType(Error): - pass - - -class TimeoutException(Error): - pass - - class DBError(Error): """Wraps an implementation specific exception.""" def __init__(self, inner_exception): @@ -319,6 +262,14 @@ class VolumeNotFoundForInstance(VolumeNotFound): message = _("Volume not found for instance %(instance_id)s.") +class SnapshotNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s not found") + + +class VolumeIsBusy(Error): + message = _("deleting volume %(volume_name)s that has snapshot") + + class ExportDeviceNotFoundForVolume(NotFound): message = _("No export device found for volume %(volume_id)s.") diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 457a1c9e6..e783d3a5a 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -153,8 +153,7 @@ class VolumeDriver(object): volume['name'])) out = out.strip() if (out[0] == 'o') or (out[0] == 'O'): - raise exception.VolumeIsBusy( - _('deleting volume %s that has snapshot'), volume['name']) + raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume, volume['size']) -- cgit From c5dbee818b1a06bf5358c32197c8e15ecf0f660d Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 20:19:35 +0900 Subject: db: fix db versioning --- .../versions/015_add_volume_snapshot_support.py | 71 ---------------------- .../versions/016_add_snapshot_id_to_volumes.py | 48 --------------- .../versions/016_add_volume_snapshot_support.py | 71 ++++++++++++++++++++++ .../versions/017_add_snapshot_id_to_volumes.py | 48 +++++++++++++++ 4 files changed, 119 insertions(+), 119 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py deleted file mode 100644 index 288f63e72..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py +++ /dev/null @@ -1,71 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 MORITA Kazutaka. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - -meta = MetaData() - -snapshots = Table('snapshots', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('volume_id', Integer(), nullable=False), - Column('user_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('status', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('progress', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('volume_size', Integer()), - Column('scheduled_at', DateTime(timezone=False)), - Column('display_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - ) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - try: - snapshots.create() - except Exception: - logging.info(repr(snapshots)) - logging.exception('Exception while creating table') - meta.drop_all(tables=[snapshots]) - raise - - -def downgrade(migrate_engine): - # Operations to reverse the above upgrade go here. - snapshots.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py deleted file mode 100644 index 0a50123bf..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_add_snapshot_id_to_volumes.py +++ /dev/null @@ -1,48 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 MORITA Kazutaka. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - - -# Table stub-definitions -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -# -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Column -# - -snapshot_id = Column('snapshot_id', Integer()) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - # Add columns to existing tables - volumes.create_column(snapshot_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py new file mode 100644 index 000000000..288f63e72 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_add_volume_snapshot_support.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + +meta = MetaData() + +snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + snapshots.create() + except Exception: + logging.info(repr(snapshots)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + snapshots.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py new file mode 100644 index 000000000..0a50123bf --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/017_add_snapshot_id_to_volumes.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Column +# + +snapshot_id = Column('snapshot_id', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(snapshot_id) -- cgit From 1c1a06c3731dd82b331f317ba52edbfe2110a40e Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 9 May 2011 11:47:33 -0400 Subject: clean up unused functions from virt/images.py --- nova/virt/images.py | 61 ----------------------------------------------------- 1 file changed, 61 deletions(-) (limited to 'nova') diff --git a/nova/virt/images.py b/nova/virt/images.py index 2e3f2ee4d..1eb6f4a5f 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -51,67 +51,6 @@ def fetch(image_id, path, _user, _project): metadata = image_service.get(elevated, image_id, image_file) return metadata - -# NOTE(vish): The methods below should be unnecessary, but I'm leaving -# them in case the glance client does not work on windows. -def _fetch_image_no_curl(url, path, headers): - request = urllib2.Request(url) - for (k, v) in headers.iteritems(): - request.add_header(k, v) - - def urlretrieve(urlfile, fpath): - chunk = 1 * 1024 * 1024 - f = open(fpath, "wb") - while 1: - data = urlfile.read(chunk) - if not data: - break - f.write(data) - - urlopened = urllib2.urlopen(request) - urlretrieve(urlopened, path) - LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals()) - - -def _fetch_s3_image(image, path, user, project): - url = image_url(image) - - # This should probably move somewhere else, like e.g. a download_as - # method on User objects and at the same time get rewritten to use - # a web client. - headers = {} - headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - - (_, _, url_path, _, _, _) = urlparse.urlparse(url) - access = manager.AuthManager().get_access_key(user, project) - signature = signer.Signer(user.secret.encode()).s3_authorization(headers, - 'GET', - url_path) - headers['Authorization'] = 'AWS %s:%s' % (access, signature) - - if sys.platform.startswith('win'): - return _fetch_image_no_curl(url, path, headers) - else: - cmd = ['/usr/bin/curl', '--fail', '--silent', url] - for (k, v) in headers.iteritems(): - cmd += ['-H', '\'%s: %s\'' % (k, v)] - - cmd += ['-o', path] - return utils.execute(*cmd) - - -def _fetch_local_image(image, path, user, project): - source = _image_path(os.path.join(image, 'image')) - if sys.platform.startswith('win'): - return shutil.copy(source, path) - else: - return utils.execute('cp', source, path) - - -def _image_path(path): - return os.path.join(FLAGS.images_path, path) - - # TODO(vish): xenapi should use the glance client code directly instead # of retrieving the image using this method. def image_url(image): -- cgit From 4364c3e4103e41fcb8bb0c2af764c37c1ff4afab Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 9 May 2011 16:52:52 -0500 Subject: Better message format description --- nova/notifier/__init__.py | 40 +++++++++++++++++++++++++++++++++++++--- nova/notifier/rabbit_notifier.py | 6 +++--- 2 files changed, 40 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py index 8053b8a0a..e6a4a0165 100644 --- a/nova/notifier/__init__.py +++ b/nova/notifier/__init__.py @@ -13,12 +13,46 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime +import json + from nova import flags from nova import utils FLAGS = flags.FLAGS -def notify(event_name, model): - """Sends a notification using the specified driver""" +flags.DEFINE_string('default_notification_level', 'info', + 'Default notification level for outgoing notifications') + +WARN = 'WARN' +INFO = 'INFO' +ERROR = 'ERROR' +CRITICAL = 'CRITICAL' +DEBUG = 'DEBUG' + +log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + +def notify(event_name, publisher_id, event_type, priority, payload): + """ + Sends a notification using the specified driver + + Message format is as follows: + + publisher_id - the source worker_type.host of the message + timestamp - the GMT timestamp the notification was sent at + event_type - the literal type of event (ex. Instance Creation) + priority - patterned after the enumeration of Python logging levels in + the set (DEBUG, WARN, INFO, ERROR, CRITICAL) + payload - A python dictionary of attributes + + The payload will be constructed as a dictionary of the above attributes, + and converted into a JSON dump, which will then be sent via the transport + mechanism defined by the driver. + """ driver = utils.import_class(FLAGS.notification_driver)() - driver.notify(event_name, model) + message = dict(publisher_id=publisher_id, + event_type=event_type, + priority=priority, + payload=payload, + time=datetime.datetime.utcnow()) + driver.notify(json.dumps(message)) diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py index 33cf06566..e4bd85398 100644 --- a/nova/notifier/rabbit_notifier.py +++ b/nova/notifier/rabbit_notifier.py @@ -25,13 +25,13 @@ FLAGS = flags.FLAGS flags.DEFINE_string('notification_topic', 'notifications', 'RabbitMQ topic used for Nova notifications') + class RabbitNotifier(object): """Sends notifications to a specific RabbitMQ server and topic""" pass - def notify(self, event_name, model): + def notify(self, payload): """Sends a notification to the RabbitMQ""" context = nova.context.get_admin_context() topic = FLAGS.notification_topic - msg = { 'event_name': event_name, 'model': model.__dict__ } - rpc.cast(context, topic, json.dumps(msg)) + rpc.cast(context, topic, msg) -- cgit From 09b795b8d6f0b925dbd4bcd203f471607c42f368 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 9 May 2011 19:46:15 -0400 Subject: got rid of unnecessary imports --- nova/virt/images.py | 9 --------- 1 file changed, 9 deletions(-) (limited to 'nova') diff --git a/nova/virt/images.py b/nova/virt/images.py index 1eb6f4a5f..8689c0ed3 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -21,19 +21,10 @@ Handling of VM disk images. """ -import os.path -import shutil -import sys -import time -import urllib2 -import urlparse - from nova import context from nova import flags from nova import log as logging from nova import utils -from nova.auth import manager -from nova.auth import signer FLAGS = flags.FLAGS -- cgit From 288030b2b9834ca65e822a770f1b2d052ee27a10 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 10 May 2011 14:40:28 -0500 Subject: Test --- nova/tests/test_compute.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'nova') diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 393110791..a35132426 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -329,6 +329,32 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(self.context, instance_id) + def test_finish_resize(self): + """Contrived test to ensure finish_resize doesn't raise anything""" + + def fake(*args, **kwargs): pass + + self.stubs.Set(self.compute.driver, 'finish_resize', fake) + self.stubs.Set(self.compute.driver, 'finish_resize', fake) + context = self.context.elevated() + instance_id = self._create_instance() + self.compute.prep_resize(context, instance_id, 1) + migration_ref = db.migration_get_by_instance_and_status(context, + instance_id, 'pre-migrating') + try: + self.compute.finish_resize(context, instance_id, + int(migration_ref['id']), {}) + except KeyError, e: + # Only catch key errors. We want other reasons for the test to + # fail to actually error out so we don't obscure anything + self.fail() + + self.compute.terminate_instance(self.context, instance_id) + + def test_resize_instance(self): + """Ensure instance can be migrated/resized""" + instance_id = self._create_instance() + def test_resize_instance(self): """Ensure instance can be migrated/resized""" instance_id = self._create_instance() -- cgit From 64f9fdc15f744c2646f6f4a519cf0f0df2845239 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 10 May 2011 14:53:03 -0500 Subject: Pep8 --- nova/tests/test_compute.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index a35132426..9926e1ca3 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -332,7 +332,8 @@ class ComputeTestCase(test.TestCase): def test_finish_resize(self): """Contrived test to ensure finish_resize doesn't raise anything""" - def fake(*args, **kwargs): pass + def fake(*args, **kwargs): + pass self.stubs.Set(self.compute.driver, 'finish_resize', fake) self.stubs.Set(self.compute.driver, 'finish_resize', fake) @@ -350,10 +351,10 @@ class ComputeTestCase(test.TestCase): self.fail() self.compute.terminate_instance(self.context, instance_id) - - def test_resize_instance(self): - """Ensure instance can be migrated/resized""" - instance_id = self._create_instance() + + def test_resize_instance(self): + """Ensure instance can be migrated/resized""" + instance_id = self._create_instance() def test_resize_instance(self): """Ensure instance can be migrated/resized""" -- cgit From 19f5d2a938ffa4c7bcba849766d2450eaecc94eb Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 10 May 2011 14:57:44 -0500 Subject: Whoops --- nova/tests/test_compute.py | 4 ---- 1 file changed, 4 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9926e1ca3..1b0e66bef 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -352,10 +352,6 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(self.context, instance_id) - def test_resize_instance(self): - """Ensure instance can be migrated/resized""" - instance_id = self._create_instance() - def test_resize_instance(self): """Ensure instance can be migrated/resized""" instance_id = self._create_instance() -- cgit From 3d756a8343845acfead201621a6d658c8ac616fb Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 10 May 2011 15:42:00 -0500 Subject: Add example --- nova/notifier/__init__.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py index e6a4a0165..aacbf8ac3 100644 --- a/nova/notifier/__init__.py +++ b/nova/notifier/__init__.py @@ -48,11 +48,20 @@ def notify(event_name, publisher_id, event_type, priority, payload): The payload will be constructed as a dictionary of the above attributes, and converted into a JSON dump, which will then be sent via the transport mechanism defined by the driver. + + Message example: + + { 'publisher_id': 'compute.host1', + 'timestamp': '2011-05-09 22:00:14.621831', + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} + """ driver = utils.import_class(FLAGS.notification_driver)() message = dict(publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, - time=datetime.datetime.utcnow()) + time=str(datetime.datetime.utcnow())) driver.notify(json.dumps(message)) -- cgit From fa3195b6206cffc26d421db891e1a580a18f0fb0 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 10 May 2011 16:40:47 -0500 Subject: Better tests --- nova/notifier/__init__.py | 5 +++++ nova/notifier/rabbit_notifier.py | 2 +- nova/tests/test_notifier.py | 40 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 44 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py index aacbf8ac3..942c1a1a2 100644 --- a/nova/notifier/__init__.py +++ b/nova/notifier/__init__.py @@ -32,6 +32,9 @@ DEBUG = 'DEBUG' log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) +class BadPriorityException(Exception): + pass + def notify(event_name, publisher_id, event_type, priority, payload): """ Sends a notification using the specified driver @@ -58,6 +61,8 @@ def notify(event_name, publisher_id, event_type, priority, payload): 'payload': {'instance_id': 12, ... }} """ + if priority not in log_levels: + raise BadPriorityException('%s not in valid priorities' % priority) driver = utils.import_class(FLAGS.notification_driver)() message = dict(publisher_id=publisher_id, event_type=event_type, diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py index e4bd85398..1d62005a8 100644 --- a/nova/notifier/rabbit_notifier.py +++ b/nova/notifier/rabbit_notifier.py @@ -34,4 +34,4 @@ class RabbitNotifier(object): """Sends a notification to the RabbitMQ""" context = nova.context.get_admin_context() topic = FLAGS.notification_topic - rpc.cast(context, topic, msg) + rpc.cast(context, topic, payload) diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 4d6289e6a..396ce13b1 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -13,6 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +import json + import nova from nova import flags @@ -42,9 +44,27 @@ class NotifierTestCase(test.TestCase): class Mock(object): pass - notifier.notify('derp', Mock()) + nova.notifier.notify('event_name', 'publisher_id', 'event_type', + nova.notifier.WARN, dict(a=3)) self.assertEqual(self.notify_called, True) + def test_verify_message_format(self): + """A test to ensure changing the message format is prohibitively + annoying""" + def message_assert(cls, blob): + message = json.loads(blob) + fields = [ ('publisher_id', 'publisher_id'), + ('event_type', 'event_type'), + ('priority', 'WARN'), + ('payload', dict(a=3))] + for k, v in fields: + self.assertEqual(message[k], v) + + self.stubs.Set(nova.notifier.no_op_notifier.NoopNotifier, 'notify', + message_assert) + nova.notifier.notify('event_name', 'publisher_id', 'event_type', + nova.notifier.WARN, dict(a=3)) + def test_send_rabbit_notification(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier.RabbitNotifier') @@ -55,6 +75,22 @@ class NotifierTestCase(test.TestCase): class Mock(object): pass self.stubs.Set(nova.rpc, 'cast', mock_cast) - notifier.notify('derp', Mock()) + nova.notifier.notify('event_name', 'publisher_id', 'event_type', + nova.notifier.WARN, dict(a=3)) self.assertEqual(self.mock_cast, True) + + def test_invalid_priority(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier.RabbitNotifier') + self.mock_cast = False + def mock_cast(cls, *args): + pass + + class Mock(object): + pass + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + self.assertRaises(nova.notifier.BadPriorityException, + nova.notifier.notify, 'event_name', 'publisher_id', + 'event_type', 'not a priority', dict(a=3)) -- cgit From 351c07f43c8ee072b0351973db9b5b9bd1656571 Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Tue, 10 May 2011 23:29:16 +0000 Subject: Add priority based queues to notifications. Remove duplicate json encoding in notifier (rpc.cast does encoding... ) make no_op_notifier match rabbit one for signature on notify() --- nova/notifier/__init__.py | 5 ++--- nova/notifier/no_op_notifier.py | 2 +- nova/notifier/rabbit_notifier.py | 7 ++++--- nova/tests/test_notifier.py | 29 +++++++++++++++++++++++------ 4 files changed, 30 insertions(+), 13 deletions(-) (limited to 'nova') diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py index 942c1a1a2..6429ea965 100644 --- a/nova/notifier/__init__.py +++ b/nova/notifier/__init__.py @@ -14,14 +14,13 @@ # under the License. import datetime -import json from nova import flags from nova import utils FLAGS = flags.FLAGS -flags.DEFINE_string('default_notification_level', 'info', +flags.DEFINE_string('default_notification_level', 'INFO', 'Default notification level for outgoing notifications') WARN = 'WARN' @@ -69,4 +68,4 @@ def notify(event_name, publisher_id, event_type, priority, payload): priority=priority, payload=payload, time=str(datetime.datetime.utcnow())) - driver.notify(json.dumps(message)) + driver.notify(message) diff --git a/nova/notifier/no_op_notifier.py b/nova/notifier/no_op_notifier.py index 3fefe6f8f..f425f06ea 100644 --- a/nova/notifier/no_op_notifier.py +++ b/nova/notifier/no_op_notifier.py @@ -14,6 +14,6 @@ # under the License. class NoopNotifier(object): - def notify(self, event_name, model): + def notify(self, payload): """Notifies the recipient of the desired event given the model""" pass diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py index 1d62005a8..4b6538696 100644 --- a/nova/notifier/rabbit_notifier.py +++ b/nova/notifier/rabbit_notifier.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import json import nova.context @@ -28,10 +27,12 @@ flags.DEFINE_string('notification_topic', 'notifications', class RabbitNotifier(object): """Sends notifications to a specific RabbitMQ server and topic""" - pass def notify(self, payload): """Sends a notification to the RabbitMQ""" context = nova.context.get_admin_context() - topic = FLAGS.notification_topic + priority = payload.get('priority', + FLAGS.default_notification_level) + priority = priority.lower() + topic = '%s.%s' % (FLAGS.notification_topic, priority) rpc.cast(context, topic, payload) diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 396ce13b1..8fc43d9de 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -13,13 +13,14 @@ # License for the specific language governing permissions and limitations # under the License. -import json - import nova +from nova import context from nova import flags +from nova import rpc from nova import notifier from nova.notifier import no_op_notifier +from nova.notifier import rabbit_notifier from nova import test import stubout @@ -51,8 +52,7 @@ class NotifierTestCase(test.TestCase): def test_verify_message_format(self): """A test to ensure changing the message format is prohibitively annoying""" - def message_assert(cls, blob): - message = json.loads(blob) + def message_assert(cls, message): fields = [ ('publisher_id', 'publisher_id'), ('event_type', 'event_type'), ('priority', 'WARN'), @@ -71,7 +71,7 @@ class NotifierTestCase(test.TestCase): self.mock_cast = False def mock_cast(cls, *args): self.mock_cast = True - + class Mock(object): pass self.stubs.Set(nova.rpc, 'cast', mock_cast) @@ -86,7 +86,7 @@ class NotifierTestCase(test.TestCase): self.mock_cast = False def mock_cast(cls, *args): pass - + class Mock(object): pass @@ -94,3 +94,20 @@ class NotifierTestCase(test.TestCase): self.assertRaises(nova.notifier.BadPriorityException, nova.notifier.notify, 'event_name', 'publisher_id', 'event_type', 'not a priority', dict(a=3)) + + def test_rabbit_priority_queue(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier.RabbitNotifier') + self.stubs.Set(nova.flags.FLAGS, 'notification_topic', + 'testnotify') + + self.test_topic = None + + def mock_cast(context, topic, msg): + self.test_topic = topic + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + nova.notifier.notify('event_name', 'publisher_id', + 'event_type', 'DEBUG', dict(a=3)) + self.assertEqual(self.test_topic, 'testnotify.debug') + -- cgit From e1dc9cfb521f21dd0cdd4d9771d78ef5024cebad Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Tue, 10 May 2011 23:57:38 +0000 Subject: added in log_notifier for easier debugging --- nova/notifier/log_notifier.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 nova/notifier/log_notifier.py (limited to 'nova') diff --git a/nova/notifier/log_notifier.py b/nova/notifier/log_notifier.py new file mode 100644 index 000000000..05126b591 --- /dev/null +++ b/nova/notifier/log_notifier.py @@ -0,0 +1,33 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from nova import flags +from nova import log as logging + +FLAGS = flags.FLAGS + +class LogNotifier(object): + """ log notifications using nova's default logging system """ + + def notify(self, payload): + """Notifies the recipient of the desired event given the model""" + priority = payload.get('priority', + FLAGS.default_notification_level) + priority = priority.lower() + logger = logging.getLogger('nova.notification.%s' % payload['event_type']) + getattr(logger, priority)(json.dumps(payload)) + -- cgit From 2e44facea2f7b2c12dec9a14ea3595aadd8a35fc Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 11 May 2011 10:40:54 -0500 Subject: Code cleanup --- nova/notifier/__init__.py | 22 +++++++++++++--------- nova/tests/test_notifier.py | 12 +++++------- 2 files changed, 18 insertions(+), 16 deletions(-) (limited to 'nova') diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py index 942c1a1a2..58809f179 100644 --- a/nova/notifier/__init__.py +++ b/nova/notifier/__init__.py @@ -15,6 +15,7 @@ import datetime import json +import uuid from nova import flags from nova import utils @@ -41,6 +42,7 @@ def notify(event_name, publisher_id, event_type, priority, payload): Message format is as follows: + message_id - a UUID representing the id for this notification publisher_id - the source worker_type.host of the message timestamp - the GMT timestamp the notification was sent at event_type - the literal type of event (ex. Instance Creation) @@ -48,23 +50,25 @@ def notify(event_name, publisher_id, event_type, priority, payload): the set (DEBUG, WARN, INFO, ERROR, CRITICAL) payload - A python dictionary of attributes - The payload will be constructed as a dictionary of the above attributes, - and converted into a JSON dump, which will then be sent via the transport - mechanism defined by the driver. + The message body will be constructed as a dictionary of the above + attributes, and converted into a JSON dump, which will then be sent + via the transport mechanism defined by the driver. Message example: - { 'publisher_id': 'compute.host1', - 'timestamp': '2011-05-09 22:00:14.621831', - 'priority': 'WARN', - 'event_type': 'compute.create_instance', - 'payload': {'instance_id': 12, ... }} + {'message_id': str(uuid.uuid4()), + 'publisher_id': 'compute.host1', + 'timestamp': datetime.datetime.utcnow(), + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException('%s not in valid priorities' % priority) driver = utils.import_class(FLAGS.notification_driver)() - message = dict(publisher_id=publisher_id, + message = dict(message_id=str(uuid.uuid4()), + publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 396ce13b1..640a0cb34 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -53,12 +53,13 @@ class NotifierTestCase(test.TestCase): annoying""" def message_assert(cls, blob): message = json.loads(blob) - fields = [ ('publisher_id', 'publisher_id'), - ('event_type', 'event_type'), - ('priority', 'WARN'), - ('payload', dict(a=3))] + fields = [('publisher_id', 'publisher_id'), + ('event_type', 'event_type'), + ('priority', 'WARN'), + ('payload', dict(a=3))] for k, v in fields: self.assertEqual(message[k], v) + self.assertTrue(len(message['message_id']) > 0) self.stubs.Set(nova.notifier.no_op_notifier.NoopNotifier, 'notify', message_assert) @@ -81,9 +82,6 @@ class NotifierTestCase(test.TestCase): self.assertEqual(self.mock_cast, True) def test_invalid_priority(self): - self.stubs.Set(nova.flags.FLAGS, 'notification_driver', - 'nova.notifier.rabbit_notifier.RabbitNotifier') - self.mock_cast = False def mock_cast(cls, *args): pass -- cgit From 96f59724eaf57c8eae57b853484137de5fff672c Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 11 May 2011 13:10:40 -0500 Subject: Moved everything into notifier/api --- nova/notifier/__init__.py | 61 ------------------------------------ nova/notifier/api.py | 75 +++++++++++++++++++++++++++++++++++++++++++++ nova/tests/test_notifier.py | 22 +++++++------ 3 files changed, 87 insertions(+), 71 deletions(-) create mode 100644 nova/notifier/api.py (limited to 'nova') diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py index 0d4c970df..482d54e4f 100644 --- a/nova/notifier/__init__.py +++ b/nova/notifier/__init__.py @@ -12,64 +12,3 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -import datetime -import uuid - -from nova import flags -from nova import utils - -FLAGS = flags.FLAGS - -flags.DEFINE_string('default_notification_level', 'INFO', - 'Default notification level for outgoing notifications') - -WARN = 'WARN' -INFO = 'INFO' -ERROR = 'ERROR' -CRITICAL = 'CRITICAL' -DEBUG = 'DEBUG' - -log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) - -class BadPriorityException(Exception): - pass - -def notify(event_name, publisher_id, event_type, priority, payload): - """ - Sends a notification using the specified driver - - Message format is as follows: - - message_id - a UUID representing the id for this notification - publisher_id - the source worker_type.host of the message - timestamp - the GMT timestamp the notification was sent at - event_type - the literal type of event (ex. Instance Creation) - priority - patterned after the enumeration of Python logging levels in - the set (DEBUG, WARN, INFO, ERROR, CRITICAL) - payload - A python dictionary of attributes - - The message body will be constructed as a dictionary of the above - attributes, and converted into a JSON dump, which will then be sent - via the transport mechanism defined by the driver. - - Message example: - - {'message_id': str(uuid.uuid4()), - 'publisher_id': 'compute.host1', - 'timestamp': datetime.datetime.utcnow(), - 'priority': 'WARN', - 'event_type': 'compute.create_instance', - 'payload': {'instance_id': 12, ... }} - - """ - if priority not in log_levels: - raise BadPriorityException('%s not in valid priorities' % priority) - driver = utils.import_class(FLAGS.notification_driver)() - message = dict(message_id=str(uuid.uuid4()), - publisher_id=publisher_id, - event_type=event_type, - priority=priority, - payload=payload, - time=str(datetime.datetime.utcnow())) - driver.notify(message) diff --git a/nova/notifier/api.py b/nova/notifier/api.py new file mode 100644 index 000000000..04da8153e --- /dev/null +++ b/nova/notifier/api.py @@ -0,0 +1,75 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.import datetime + +import datetime +import uuid + +from nova import flags +from nova import utils + +FLAGS = flags.FLAGS + +flags.DEFINE_string('default_notification_level', 'INFO', + 'Default notification level for outgoing notifications') + +WARN = 'WARN' +INFO = 'INFO' +ERROR = 'ERROR' +CRITICAL = 'CRITICAL' +DEBUG = 'DEBUG' + +log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + +class BadPriorityException(Exception): + pass + +def notify(event_name, publisher_id, event_type, priority, payload): + """ + Sends a notification using the specified driver + + Message format is as follows: + + message_id - a UUID representing the id for this notification + publisher_id - the source worker_type.host of the message + timestamp - the GMT timestamp the notification was sent at + event_type - the literal type of event (ex. Instance Creation) + priority - patterned after the enumeration of Python logging levels in + the set (DEBUG, WARN, INFO, ERROR, CRITICAL) + payload - A python dictionary of attributes + + The message body will be constructed as a dictionary of the above + attributes, and converted into a JSON dump, which will then be sent + via the transport mechanism defined by the driver. + + Message example: + + {'message_id': str(uuid.uuid4()), + 'publisher_id': 'compute.host1', + 'timestamp': datetime.datetime.utcnow(), + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} + + """ + if priority not in log_levels: + raise BadPriorityException('%s not in valid priorities' % priority) + driver = utils.import_class(FLAGS.notification_driver)() + message = dict(message_id=str(uuid.uuid4()), + publisher_id=publisher_id, + event_type=event_type, + priority=priority, + payload=payload, + timestamp=str(datetime.datetime.utcnow())) + driver.notify(message) diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index d2964c42f..64ec1dec5 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -18,7 +18,8 @@ import nova from nova import context from nova import flags from nova import rpc -from nova import notifier +import nova.notifier.api +from nova.notifier.api import notify from nova.notifier import no_op_notifier from nova.notifier import rabbit_notifier from nova import test @@ -45,8 +46,8 @@ class NotifierTestCase(test.TestCase): class Mock(object): pass - nova.notifier.notify('event_name', 'publisher_id', 'event_type', - nova.notifier.WARN, dict(a=3)) + notify('event_name', 'publisher_id', 'event_type', + nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_called, True) def test_verify_message_format(self): @@ -60,11 +61,12 @@ class NotifierTestCase(test.TestCase): for k, v in fields: self.assertEqual(message[k], v) self.assertTrue(len(message['message_id']) > 0) + self.assertTrue(len(message['timestamp']) > 0) self.stubs.Set(nova.notifier.no_op_notifier.NoopNotifier, 'notify', message_assert) - nova.notifier.notify('event_name', 'publisher_id', 'event_type', - nova.notifier.WARN, dict(a=3)) + notify('event_name', 'publisher_id', 'event_type', + nova.notifier.api.WARN, dict(a=3)) def test_send_rabbit_notification(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', @@ -76,8 +78,8 @@ class NotifierTestCase(test.TestCase): class Mock(object): pass self.stubs.Set(nova.rpc, 'cast', mock_cast) - nova.notifier.notify('event_name', 'publisher_id', 'event_type', - nova.notifier.WARN, dict(a=3)) + notify('event_name', 'publisher_id', 'event_type', + nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.mock_cast, True) @@ -89,8 +91,8 @@ class NotifierTestCase(test.TestCase): pass self.stubs.Set(nova.rpc, 'cast', mock_cast) - self.assertRaises(nova.notifier.BadPriorityException, - nova.notifier.notify, 'event_name', 'publisher_id', + self.assertRaises(nova.notifier.api.BadPriorityException, + notify, 'event_name', 'publisher_id', 'event_type', 'not a priority', dict(a=3)) def test_rabbit_priority_queue(self): @@ -105,7 +107,7 @@ class NotifierTestCase(test.TestCase): self.test_topic = topic self.stubs.Set(nova.rpc, 'cast', mock_cast) - nova.notifier.notify('event_name', 'publisher_id', + notify('event_name', 'publisher_id', 'event_type', 'DEBUG', dict(a=3)) self.assertEqual(self.test_topic, 'testnotify.debug') -- cgit From 4d18824aee8598473ba2c05b23466ac7be199dc7 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 11 May 2011 13:22:55 -0500 Subject: Pep8 stuff --- nova/notifier/api.py | 2 ++ nova/notifier/log_notifier.py | 5 +++-- nova/notifier/no_op_notifier.py | 3 +++ nova/notifier/rabbit_notifier.py | 2 +- nova/tests/test_notifier.py | 16 ++++++++++------ 5 files changed, 19 insertions(+), 9 deletions(-) (limited to 'nova') diff --git a/nova/notifier/api.py b/nova/notifier/api.py index 04da8153e..7090af5f4 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -32,9 +32,11 @@ DEBUG = 'DEBUG' log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + class BadPriorityException(Exception): pass + def notify(event_name, publisher_id, event_type, priority, payload): """ Sends a notification using the specified driver diff --git a/nova/notifier/log_notifier.py b/nova/notifier/log_notifier.py index 05126b591..4f99c589a 100644 --- a/nova/notifier/log_notifier.py +++ b/nova/notifier/log_notifier.py @@ -20,6 +20,7 @@ from nova import log as logging FLAGS = flags.FLAGS + class LogNotifier(object): """ log notifications using nova's default logging system """ @@ -28,6 +29,6 @@ class LogNotifier(object): priority = payload.get('priority', FLAGS.default_notification_level) priority = priority.lower() - logger = logging.getLogger('nova.notification.%s' % payload['event_type']) + logger = logging.getLogger( + 'nova.notification.%s' % payload['event_type']) getattr(logger, priority)(json.dumps(payload)) - diff --git a/nova/notifier/no_op_notifier.py b/nova/notifier/no_op_notifier.py index f425f06ea..400216f32 100644 --- a/nova/notifier/no_op_notifier.py +++ b/nova/notifier/no_op_notifier.py @@ -13,7 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. + class NoopNotifier(object): + """A notifier that doesn't actually do anything. Simply a placeholder""" + def notify(self, payload): """Notifies the recipient of the desired event given the model""" pass diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py index 4b6538696..6f0927e9f 100644 --- a/nova/notifier/rabbit_notifier.py +++ b/nova/notifier/rabbit_notifier.py @@ -21,7 +21,7 @@ from nova import rpc FLAGS = flags.FLAGS -flags.DEFINE_string('notification_topic', 'notifications', +flags.DEFINE_string('notification_topic', 'notifications', 'RabbitMQ topic used for Nova notifications') diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 64ec1dec5..b9a74a761 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -26,6 +26,7 @@ from nova import test import stubout + class NotifierTestCase(test.TestCase): """Test case for notifications""" def setUp(self): @@ -38,6 +39,7 @@ class NotifierTestCase(test.TestCase): def test_send_notification(self): self.notify_called = False + def mock_notify(cls, *args): self.notify_called = True @@ -52,7 +54,8 @@ class NotifierTestCase(test.TestCase): def test_verify_message_format(self): """A test to ensure changing the message format is prohibitively - annoying""" + annoying""" + def message_assert(cls, message): fields = [('publisher_id', 'publisher_id'), ('event_type', 'event_type'), @@ -72,12 +75,14 @@ class NotifierTestCase(test.TestCase): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier.RabbitNotifier') self.mock_cast = False + def mock_cast(cls, *args): self.mock_cast = True class Mock(object): pass - self.stubs.Set(nova.rpc, 'cast', mock_cast) + + self.stubs.Set(nova.rpc, 'cast', mock_cast) notify('event_name', 'publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) @@ -90,8 +95,8 @@ class NotifierTestCase(test.TestCase): class Mock(object): pass - self.stubs.Set(nova.rpc, 'cast', mock_cast) - self.assertRaises(nova.notifier.api.BadPriorityException, + self.stubs.Set(nova.rpc, 'cast', mock_cast) + self.assertRaises(nova.notifier.api.BadPriorityException, notify, 'event_name', 'publisher_id', 'event_type', 'not a priority', dict(a=3)) @@ -106,8 +111,7 @@ class NotifierTestCase(test.TestCase): def mock_cast(context, topic, msg): self.test_topic = topic - self.stubs.Set(nova.rpc, 'cast', mock_cast) + self.stubs.Set(nova.rpc, 'cast', mock_cast) notify('event_name', 'publisher_id', 'event_type', 'DEBUG', dict(a=3)) self.assertEqual(self.test_topic, 'testnotify.debug') - -- cgit From 81a65d46d261cb6998c6d714ca5769a661ad31ab Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Wed, 11 May 2011 15:17:14 -0400 Subject: make instance.instance_type_id an integer to support joins in postgres --- .../016_make_instance_type_id_an_integer.py | 61 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 2 +- 2 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py new file mode 100644 index 000000000..5d95c1024 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py @@ -0,0 +1,61 @@ +from sqlalchemy import Column, Integer, MetaData, String, Table + +meta = MetaData() + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + types = {} + for instance in migrate_engine.execute(instances.select()): + try: + types[instance.id] = int(instance.instance_type_id) + except ValueError: + types[instance.id] = None + + integer_column = Column('instance_type_id_int', Integer(), nullable=True) + string_column = instances.c.instance_type_id + + integer_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_int=instance_type_id) + migrate_engine.execute(update) + + string_column.alter(name='instance_type_id_str') + integer_column.alter(name='instance_type_id') + string_column.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + integer_column = instances.c.instance_type_id + string_column = Column('instance_type_id_str', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + else: + types[instance.id] = str(instance.instance_type_id) + + string_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_str=instance_type_id) + migrate_engine.execute(update) + + integer_column.alter(name='instance_type_id_int') + string_column.alter(name='instance_type_id') + integer_column.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 36a084a1d..486364322 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -209,7 +209,7 @@ class Instance(BASE, NovaBase): hostname = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) - instance_type_id = Column(String(255)) + instance_type_id = Column(Integer) user_data = Column(Text) -- cgit From 8447c6e18e68eeb23175ddafdac1da93c538d734 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 11 May 2011 14:24:01 -0500 Subject: docstring cleanup, nova/network dir --- nova/network/api.py | 45 +++++---- nova/network/linux_net.py | 223 ++++++++++++++++++++++-------------------- nova/network/manager.py | 67 +++++++------ nova/network/vmwareapi_net.py | 14 +-- nova/network/xenapi_net.py | 18 ++-- 5 files changed, 188 insertions(+), 179 deletions(-) (limited to 'nova') diff --git a/nova/network/api.py b/nova/network/api.py index 1d8193b28..e2eacdf42 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -16,9 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to instances (guest vms). -""" +"""Handles all requests relating to instances (guest vms).""" from nova import db from nova import exception @@ -28,6 +26,7 @@ from nova import quota from nova import rpc from nova.db import base + FLAGS = flags.FLAGS LOG = logging.getLogger('nova.network') @@ -37,19 +36,19 @@ class API(base.Base): def allocate_floating_ip(self, context): if quota.allowed_floating_ips(context, 1) < 1: - LOG.warn(_("Quota exceeeded for %s, tried to allocate " - "address"), - context.project_id) - raise quota.QuotaError(_("Address quota exceeded. You cannot " - "allocate any more addresses")) + LOG.warn(_('Quota exceeeded for %s, tried to allocate ' + 'address'), + context.project_id) + raise quota.QuotaError(_('Address quota exceeded. You cannot ' + 'allocate any more addresses')) # NOTE(vish): We don't know which network host should get the ip # when we allocate, so just send it to any one. This # will probably need to move into a network supervisor # at some point. return rpc.call(context, FLAGS.network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) + {'method': 'allocate_floating_ip', + 'args': {'project_id': context.project_id}}) def release_floating_ip(self, context, address, affect_auto_assigned=False): @@ -62,8 +61,8 @@ class API(base.Base): # at some point. rpc.cast(context, FLAGS.network_topic, - {"method": "deallocate_floating_ip", - "args": {"floating_address": floating_ip['address']}}) + {'method': 'deallocate_floating_ip', + 'args': {'floating_address': floating_ip['address']}}) def associate_floating_ip(self, context, floating_ip, fixed_ip, affect_auto_assigned=False): @@ -74,17 +73,17 @@ class API(base.Base): return # Check if the floating ip address is allocated if floating_ip['project_id'] is None: - raise exception.ApiError(_("Address (%s) is not allocated") % + raise exception.ApiError(_('Address (%s) is not allocated') % floating_ip['address']) # Check if the floating ip address is allocated to the same project if floating_ip['project_id'] != context.project_id: - LOG.warn(_("Address (%(address)s) is not allocated to your " - "project (%(project)s)"), + LOG.warn(_('Address (%(address)s) is not allocated to your ' + 'project (%(project)s)'), {'address': floating_ip['address'], 'project': context.project_id}) - raise exception.ApiError(_("Address (%(address)s) is not " - "allocated to your project" - "(%(project)s)") % + raise exception.ApiError(_('Address (%(address)s) is not ' + 'allocated to your project' + '(%(project)s)') % {'address': floating_ip['address'], 'project': context.project_id}) # NOTE(vish): Perhaps we should just pass this on to compute and @@ -92,9 +91,9 @@ class API(base.Base): host = fixed_ip['network']['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.network_topic, host), - {"method": "associate_floating_ip", - "args": {"floating_address": floating_ip['address'], - "fixed_address": fixed_ip['address']}}) + {'method': 'associate_floating_ip', + 'args': {'floating_address': floating_ip['address'], + 'fixed_address': fixed_ip['address']}}) def disassociate_floating_ip(self, context, address, affect_auto_assigned=False): @@ -108,5 +107,5 @@ class API(base.Base): host = floating_ip['fixed_ip']['network']['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.network_topic, host), - {"method": "disassociate_floating_ip", - "args": {"floating_address": floating_ip['address']}}) + {'method': 'disassociate_floating_ip', + 'args': {'floating_address': floating_ip['address']}}) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index b50a4b4ea..af91804a1 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -15,13 +15,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Implements vlans, bridges, and iptables rules using linux utilities. -""" +"""Implements vlans, bridges, and iptables rules using linux utilities.""" + +import calendar import inspect import os -import calendar from nova import db from nova import exception @@ -29,12 +28,13 @@ from nova import flags from nova import log as logging from nova import utils + LOG = logging.getLogger("nova.linux_net") def _bin_file(script): - """Return the absolute path to scipt in the bin directory""" - return os.path.abspath(os.path.join(__file__, "../../../bin", script)) + """Return the absolute path to scipt in the bin directory.""" + return os.path.abspath(os.path.join(__file__, '../../../bin', script)) FLAGS = flags.FLAGS @@ -66,11 +66,13 @@ binary_name = os.path.basename(inspect.stack()[-1][1]) class IptablesRule(object): - """An iptables rule + """An iptables rule. You shouldn't need to use this class directly, it's only used by - IptablesManager + IptablesManager. + """ + def __init__(self, chain, rule, wrap=True, top=False): self.chain = chain self.rule = rule @@ -95,7 +97,7 @@ class IptablesRule(object): class IptablesTable(object): - """An iptables table""" + """An iptables table.""" def __init__(self): self.rules = [] @@ -103,15 +105,16 @@ class IptablesTable(object): self.unwrapped_chains = set() def add_chain(self, name, wrap=True): - """Adds a named chain to the table + """Adds a named chain to the table. The chain name is wrapped to be unique for the component creating it, so different components of Nova can safely create identically named chains without interfering with one another. At the moment, its wrapped name is -, - so if nova-compute creates a chain named "OUTPUT", it'll actually - end up named "nova-compute-OUTPUT". + so if nova-compute creates a chain named 'OUTPUT', it'll actually + end up named 'nova-compute-OUTPUT'. + """ if wrap: self.chains.add(name) @@ -119,12 +122,13 @@ class IptablesTable(object): self.unwrapped_chains.add(name) def remove_chain(self, name, wrap=True): - """Remove named chain + """Remove named chain. This removal "cascades". All rule in the chain are removed, as are all rules in other chains that jump to it. If the chain is not found, this is merely logged. + """ if wrap: chain_set = self.chains @@ -132,7 +136,7 @@ class IptablesTable(object): chain_set = self.unwrapped_chains if name not in chain_set: - LOG.debug(_("Attempted to remove chain %s which doesn't exist"), + LOG.debug(_('Attempted to remove chain %s which does not exist'), name) return @@ -147,17 +151,18 @@ class IptablesTable(object): self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules) def add_rule(self, chain, rule, wrap=True, top=False): - """Add a rule to the table + """Add a rule to the table. This is just like what you'd feed to iptables, just without - the "-A " bit at the start. + the '-A ' bit at the start. However, if you need to jump to one of your wrapped chains, prepend its name with a '$' which will ensure the wrapping is applied correctly. + """ if wrap and chain not in self.chains: - raise ValueError(_("Unknown chain: %r") % chain) + raise ValueError(_('Unknown chain: %r') % chain) if '$' in rule: rule = ' '.join(map(self._wrap_target_chain, rule.split(' '))) @@ -170,23 +175,24 @@ class IptablesTable(object): return s def remove_rule(self, chain, rule, wrap=True, top=False): - """Remove a rule from a chain + """Remove a rule from a chain. Note: The rule must be exactly identical to the one that was added. You cannot switch arguments around like you can with the iptables CLI tool. + """ try: self.rules.remove(IptablesRule(chain, rule, wrap, top)) except ValueError: - LOG.debug(_("Tried to remove rule that wasn't there:" - " %(chain)r %(rule)r %(wrap)r %(top)r"), + LOG.debug(_('Tried to remove rule that was not there:' + ' %(chain)r %(rule)r %(wrap)r %(top)r'), {'chain': chain, 'rule': rule, 'top': top, 'wrap': wrap}) class IptablesManager(object): - """Wrapper for iptables + """Wrapper for iptables. See IptablesTable for some usage docs @@ -205,7 +211,9 @@ class IptablesManager(object): For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are wrapped in the same was as the builtin filter chains. Additionally, there's a snat chain that is applied after the POSTROUTING chain. + """ + def __init__(self, execute=None): if not execute: self.execute = _execute @@ -267,11 +275,12 @@ class IptablesManager(object): @utils.synchronized('iptables', external=True) def apply(self): - """Apply the current in-memory set of iptables rules + """Apply the current in-memory set of iptables rules. This will blow away any rules left over from previous runs of the same component of Nova, and replace them with our current set of rules. This happens atomically, thanks to iptables-restore. + """ s = [('iptables', self.ipv4)] if FLAGS.use_ipv6: @@ -348,63 +357,63 @@ class IptablesManager(object): def metadata_forward(): - """Create forwarding rule for metadata""" - iptables_manager.ipv4['nat'].add_rule("PREROUTING", - "-s 0.0.0.0/0 -d 169.254.169.254/32 " - "-p tcp -m tcp --dport 80 -j DNAT " - "--to-destination %s:%s" % \ + """Create forwarding rule for metadata.""" + iptables_manager.ipv4['nat'].add_rule('PREROUTING', + '-s 0.0.0.0/0 -d 169.254.169.254/32 ' + '-p tcp -m tcp --dport 80 -j DNAT ' + '--to-destination %s:%s' % \ (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) iptables_manager.apply() def init_host(): - """Basic networking setup goes here""" + """Basic networking setup goes here.""" # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. - iptables_manager.ipv4['nat'].add_rule("snat", - "-s %s -j SNAT --to-source %s" % \ + iptables_manager.ipv4['nat'].add_rule('snat', + '-s %s -j SNAT --to-source %s' % \ (FLAGS.fixed_range, FLAGS.routing_source_ip)) - iptables_manager.ipv4['nat'].add_rule("POSTROUTING", - "-s %s -d %s -j ACCEPT" % \ + iptables_manager.ipv4['nat'].add_rule('POSTROUTING', + '-s %s -d %s -j ACCEPT' % \ (FLAGS.fixed_range, FLAGS.dmz_cidr)) - iptables_manager.ipv4['nat'].add_rule("POSTROUTING", - "-s %(range)s -d %(range)s " - "-j ACCEPT" % \ + iptables_manager.ipv4['nat'].add_rule('POSTROUTING', + '-s %(range)s -d %(range)s ' + '-j ACCEPT' % \ {'range': FLAGS.fixed_range}) iptables_manager.apply() def bind_floating_ip(floating_ip, check_exit_code=True): - """Bind ip to public interface""" + """Bind ip to public interface.""" _execute('sudo', 'ip', 'addr', 'add', floating_ip, 'dev', FLAGS.public_interface, check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): - """Unbind a public ip from public interface""" + """Unbind a public ip from public interface.""" _execute('sudo', 'ip', 'addr', 'del', floating_ip, 'dev', FLAGS.public_interface) def ensure_metadata_ip(): - """Sets up local metadata ip""" + """Sets up local metadata ip.""" _execute('sudo', 'ip', 'addr', 'add', '169.254.169.254/32', 'scope', 'link', 'dev', 'lo', check_exit_code=False) def ensure_vlan_forward(public_ip, port, private_ip): - """Sets up forwarding rules for vlan""" - iptables_manager.ipv4['filter'].add_rule("FORWARD", - "-d %s -p udp " - "--dport 1194 " - "-j ACCEPT" % private_ip) - iptables_manager.ipv4['nat'].add_rule("PREROUTING", - "-d %s -p udp " - "--dport %s -j DNAT --to %s:1194" % + """Sets up forwarding rules for vlan.""" + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '-d %s -p udp ' + '--dport 1194 ' + '-j ACCEPT' % private_ip) + iptables_manager.ipv4['nat'].add_rule('PREROUTING', + '-d %s -p udp ' + '--dport %s -j DNAT --to %s:1194' % (public_ip, port, private_ip)) iptables_manager.ipv4['nat'].add_rule("OUTPUT", "-d %s -p udp " @@ -414,37 +423,37 @@ def ensure_vlan_forward(public_ip, port, private_ip): def ensure_floating_forward(floating_ip, fixed_ip): - """Ensure floating ip forwarding rule""" + """Ensure floating ip forwarding rule.""" for chain, rule in floating_forward_rules(floating_ip, fixed_ip): iptables_manager.ipv4['nat'].add_rule(chain, rule) iptables_manager.apply() def remove_floating_forward(floating_ip, fixed_ip): - """Remove forwarding for floating ip""" + """Remove forwarding for floating ip.""" for chain, rule in floating_forward_rules(floating_ip, fixed_ip): iptables_manager.ipv4['nat'].remove_rule(chain, rule) iptables_manager.apply() def floating_forward_rules(floating_ip, fixed_ip): - return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)), - ("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)), - ("floating-snat", - "-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))] + return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)), + ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)), + ('floating-snat', + '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))] def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): - """Create a vlan and bridge unless they already exist""" + """Create a vlan and bridge unless they already exist.""" interface = ensure_vlan(vlan_num) ensure_bridge(bridge, interface, net_attrs) def ensure_vlan(vlan_num): - """Create a vlan unless it already exists""" - interface = "vlan%s" % vlan_num + """Create a vlan unless it already exists.""" + interface = 'vlan%s' % vlan_num if not _device_exists(interface): - LOG.debug(_("Starting VLAN inteface %s"), interface) + LOG.debug(_('Starting VLAN inteface %s'), interface) _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD') _execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num) _execute('sudo', 'ip', 'link', 'set', interface, 'up') @@ -464,12 +473,13 @@ def ensure_bridge(bridge, interface, net_attrs=None): The code will attempt to move any ips that already exist on the interface onto the bridge and reset the default gateway if necessary. + """ if not _device_exists(bridge): - LOG.debug(_("Starting Bridge interface for %s"), interface) + LOG.debug(_('Starting Bridge interface for %s'), interface) _execute('sudo', 'brctl', 'addbr', bridge) _execute('sudo', 'brctl', 'setfd', bridge, 0) - # _execute("sudo brctl setageing %s 10" % bridge) + # _execute('sudo brctl setageing %s 10' % bridge) _execute('sudo', 'brctl', 'stp', bridge, 'off') _execute('sudo', 'ip', 'link', 'set', bridge, 'up') if net_attrs: @@ -477,15 +487,15 @@ def ensure_bridge(bridge, interface, net_attrs=None): # bridge for it to respond to reqests properly suffix = net_attrs['cidr'].rpartition('/')[2] out, err = _execute('sudo', 'ip', 'addr', 'add', - "%s/%s" % + '%s/%s' % (net_attrs['gateway'], suffix), 'brd', net_attrs['broadcast'], 'dev', bridge, check_exit_code=False) - if err and err != "RTNETLINK answers: File exists\n": - raise exception.Error("Failed to add ip: %s" % err) + if err and err != 'RTNETLINK answers: File exists\n': + raise exception.Error('Failed to add ip: %s' % err) if(FLAGS.use_ipv6): _execute('sudo', 'ip', '-f', 'inet6', 'addr', 'change', net_attrs['cidr_v6'], @@ -501,17 +511,17 @@ def ensure_bridge(bridge, interface, net_attrs=None): # interface, so we move any ips to the bridge gateway = None out, err = _execute('sudo', 'route', '-n') - for line in out.split("\n"): + for line in out.split('\n'): fields = line.split() - if fields and fields[0] == "0.0.0.0" and fields[-1] == interface: + if fields and fields[0] == '0.0.0.0' and fields[-1] == interface: gateway = fields[1] _execute('sudo', 'route', 'del', 'default', 'gw', gateway, 'dev', interface, check_exit_code=False) out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, 'scope', 'global') - for line in out.split("\n"): + for line in out.split('\n'): fields = line.split() - if fields and fields[0] == "inet": + if fields and fields[0] == 'inet': params = fields[1:-1] _execute(*_ip_bridge_cmd('del', params, fields[-1])) _execute(*_ip_bridge_cmd('add', params, bridge)) @@ -522,18 +532,18 @@ def ensure_bridge(bridge, interface, net_attrs=None): if (err and err != "device %s is already a member of a bridge; can't " "enslave it to bridge %s.\n" % (interface, bridge)): - raise exception.Error("Failed to add interface: %s" % err) + raise exception.Error('Failed to add interface: %s' % err) - iptables_manager.ipv4['filter'].add_rule("FORWARD", - "--in-interface %s -j ACCEPT" % \ + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--in-interface %s -j ACCEPT' % \ bridge) - iptables_manager.ipv4['filter'].add_rule("FORWARD", - "--out-interface %s -j ACCEPT" % \ + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--out-interface %s -j ACCEPT' % \ bridge) def get_dhcp_leases(context, network_id): - """Return a network's hosts config in dnsmasq leasefile format""" + """Return a network's hosts config in dnsmasq leasefile format.""" hosts = [] for fixed_ip_ref in db.network_get_associated_fixed_ips(context, network_id): @@ -542,7 +552,7 @@ def get_dhcp_leases(context, network_id): def get_dhcp_hosts(context, network_id): - """Get a string containing a network's hosts config in dhcp-host format""" + """Get network's hosts config in dhcp-host format.""" hosts = [] for fixed_ip_ref in db.network_get_associated_fixed_ips(context, network_id): @@ -555,10 +565,11 @@ def get_dhcp_hosts(context, network_id): # aren't reloaded. @utils.synchronized('dnsmasq_start') def update_dhcp(context, network_id): - """(Re)starts a dnsmasq server for a given network + """(Re)starts a dnsmasq server for a given network. + + If a dnsmasq instance is already running then send a HUP + signal causing it to reload, otherwise spawn a new instance. - if a dnsmasq instance is already running then send a HUP - signal causing it to reload, otherwise spawn a new instance """ network_ref = db.network_get(context, network_id) @@ -573,16 +584,16 @@ def update_dhcp(context, network_id): # if dnsmasq is already running, then tell it to reload if pid: - out, _err = _execute('cat', "/proc/%d/cmdline" % pid, + out, _err = _execute('cat', '/proc/%d/cmdline' % pid, check_exit_code=False) if conffile in out: try: _execute('sudo', 'kill', '-HUP', pid) return except Exception as exc: # pylint: disable=W0703 - LOG.debug(_("Hupping dnsmasq threw %s"), exc) + LOG.debug(_('Hupping dnsmasq threw %s'), exc) else: - LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) + LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, @@ -625,18 +636,18 @@ interface %s try: _execute('sudo', 'kill', pid) except Exception as exc: # pylint: disable=W0703 - LOG.debug(_("killing radvd threw %s"), exc) + LOG.debug(_('killing radvd threw %s'), exc) else: - LOG.debug(_("Pid %d is stale, relaunching radvd"), pid) + LOG.debug(_('Pid %d is stale, relaunching radvd'), pid) command = _ra_cmd(network_ref) _execute(*command) db.network_update(context, network_id, - {"gateway_v6": + {'gateway_v6': utils.get_my_linklocal(network_ref['bridge'])}) def _host_lease(fixed_ip_ref): - """Return a host string for an address in leasefile format""" + """Return a host string for an address in leasefile format.""" instance_ref = fixed_ip_ref['instance'] if instance_ref['updated_at']: timestamp = instance_ref['updated_at'] @@ -645,39 +656,39 @@ def _host_lease(fixed_ip_ref): seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) - return "%d %s %s %s *" % (seconds_since_epoch + FLAGS.dhcp_lease_time, + return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time, instance_ref['mac_address'], fixed_ip_ref['address'], instance_ref['hostname'] or '*') def _host_dhcp(fixed_ip_ref): - """Return a host string for an address in dhcp-host format""" + """Return a host string for an address in dhcp-host format.""" instance_ref = fixed_ip_ref['instance'] - return "%s,%s.%s,%s" % (instance_ref['mac_address'], + return '%s,%s.%s,%s' % (instance_ref['mac_address'], instance_ref['hostname'], FLAGS.dhcp_domain, fixed_ip_ref['address']) def _execute(*cmd, **kwargs): - """Wrapper around utils._execute for fake_network""" + """Wrapper around utils._execute for fake_network.""" if FLAGS.fake_network: - LOG.debug("FAKE NET: %s", " ".join(map(str, cmd))) - return "fake", 0 + LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd))) + return 'fake', 0 else: return utils.execute(*cmd, **kwargs) def _device_exists(device): - """Check if ethernet device exists""" + """Check if ethernet device exists.""" (_out, err) = _execute('ip', 'link', 'show', 'dev', device, check_exit_code=False) return not err def _dnsmasq_cmd(net): - """Builds dnsmasq command""" + """Builds dnsmasq command.""" cmd = ['sudo', '-E', 'dnsmasq', '--strict-order', '--bind-interfaces', @@ -696,7 +707,7 @@ def _dnsmasq_cmd(net): def _ra_cmd(net): - """Builds radvd command""" + """Builds radvd command.""" cmd = ['sudo', '-E', 'radvd', # '-u', 'nobody', '-C', '%s' % _ra_file(net['bridge'], 'conf'), @@ -705,44 +716,44 @@ def _ra_cmd(net): def _stop_dnsmasq(network): - """Stops the dnsmasq instance for a given network""" + """Stops the dnsmasq instance for a given network.""" pid = _dnsmasq_pid_for(network) if pid: try: _execute('sudo', 'kill', '-TERM', pid) except Exception as exc: # pylint: disable=W0703 - LOG.debug(_("Killing dnsmasq threw %s"), exc) + LOG.debug(_('Killing dnsmasq threw %s'), exc) def _dhcp_file(bridge, kind): - """Return path to a pid, leases or conf file for a bridge""" + """Return path to a pid, leases or conf file for a bridge.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) - return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, + return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path, bridge, kind)) def _ra_file(bridge, kind): - """Return path to a pid or conf file for a bridge""" + """Return path to a pid or conf file for a bridge.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) - return os.path.abspath("%s/nova-ra-%s.%s" % (FLAGS.networks_path, + return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path, bridge, kind)) def _dnsmasq_pid_for(bridge): - """Returns the pid for prior dnsmasq instance for a bridge + """Returns the pid for prior dnsmasq instance for a bridge. - Returns None if no pid file exists + Returns None if no pid file exists. - If machine has rebooted pid might be incorrect (caller should check) - """ + If machine has rebooted pid might be incorrect (caller should check). + """ pid_file = _dhcp_file(bridge, 'pid') if os.path.exists(pid_file): @@ -751,13 +762,13 @@ def _dnsmasq_pid_for(bridge): def _ra_pid_for(bridge): - """Returns the pid for prior radvd instance for a bridge + """Returns the pid for prior radvd instance for a bridge. - Returns None if no pid file exists + Returns None if no pid file exists. - If machine has rebooted pid might be incorrect (caller should check) - """ + If machine has rebooted pid might be incorrect (caller should check). + """ pid_file = _ra_file(bridge, 'pid') if os.path.exists(pid_file): @@ -766,7 +777,7 @@ def _ra_pid_for(bridge): def _ip_bridge_cmd(action, params, device): - """Build commands to add/del ips to bridges/devices""" + """Build commands to add/del ips to bridges/devices.""" cmd = ['sudo', 'ip', 'addr', action] cmd.extend(params) diff --git a/nova/network/manager.py b/nova/network/manager.py index 0dd7f2360..5a6fdde5a 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -16,8 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Network Hosts are responsible for allocating ips and setting up network. +"""Network Hosts are responsible for allocating ips and setting up network. There are multiple backend drivers that handle specific types of networking topologies. All of the network commands are issued to a subclass of @@ -61,6 +60,8 @@ from nova import rpc LOG = logging.getLogger("nova.network.manager") + + FLAGS = flags.FLAGS flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') @@ -111,7 +112,9 @@ class NetworkManager(manager.SchedulerDependentManager): """Implements common network manager functionality. This class must be subclassed to support specific topologies. + """ + timeout_fixed_ips = True def __init__(self, network_driver=None, *args, **kwargs): @@ -122,9 +125,7 @@ class NetworkManager(manager.SchedulerDependentManager): *args, **kwargs) def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" self.driver.init_host() self.driver.ensure_metadata_ip() # Set up networking for the projects for which we're already @@ -154,11 +155,11 @@ class NetworkManager(manager.SchedulerDependentManager): self.host, time) if num: - LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) + LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num) def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - LOG.debug(_("setting network host"), context=context) + LOG.debug(_('setting network host'), context=context) host = self.db.network_set_host(context, network_id, self.host) @@ -224,39 +225,39 @@ class NetworkManager(manager.SchedulerDependentManager): def lease_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is leased.""" - LOG.debug(_("Leasing IP %s"), address, context=context) + LOG.debug(_('Leasing IP %s'), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error(_("IP %s leased that isn't associated") % + raise exception.Error(_('IP %s leased that is not associated') % address) if instance_ref['mac_address'] != mac: inst_addr = instance_ref['mac_address'] - raise exception.Error(_("IP %(address)s leased to bad" - " mac %(inst_addr)s vs %(mac)s") % locals()) + raise exception.Error(_('IP %(address)s leased to bad mac' + ' %(inst_addr)s vs %(mac)s') % locals()) now = datetime.datetime.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - LOG.warn(_("IP %s leased that was already deallocated"), address, + LOG.warn(_('IP %s leased that was already deallocated'), address, context=context) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" - LOG.debug(_("Releasing IP %s"), address, context=context) + LOG.debug(_('Releasing IP %s'), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error(_("IP %s released that isn't associated") % + raise exception.Error(_('IP %s released that is not associated') % address) if instance_ref['mac_address'] != mac: inst_addr = instance_ref['mac_address'] - raise exception.Error(_("IP %(address)s released from" - " bad mac %(inst_addr)s vs %(mac)s") % locals()) + raise exception.Error(_('IP %(address)s released from bad mac' + ' %(inst_addr)s vs %(mac)s') % locals()) if not fixed_ip_ref['leased']: - LOG.warn(_("IP %s released that was not leased"), address, + LOG.warn(_('IP %s released that was not leased'), address, context=context) self.db.fixed_ip_update(context, fixed_ip_ref['address'], @@ -286,8 +287,8 @@ class NetworkManager(manager.SchedulerDependentManager): return self.set_network_host(context, network_ref['id']) host = rpc.call(context, FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) + {'method': 'set_network_host', + 'args': {'network_id': network_ref['id']}}) return host def create_networks(self, context, cidr, num_networks, network_size, @@ -302,7 +303,7 @@ class NetworkManager(manager.SchedulerDependentManager): start = index * network_size start_v6 = index * network_size_v6 significant_bits = 32 - int(math.log(network_size, 2)) - cidr = "%s/%s" % (fixed_net[start], significant_bits) + cidr = '%s/%s' % (fixed_net[start], significant_bits) project_net = IPy.IP(cidr) net = {} net['bridge'] = FLAGS.flat_network_bridge @@ -313,13 +314,13 @@ class NetworkManager(manager.SchedulerDependentManager): net['broadcast'] = str(project_net.broadcast()) net['dhcp_start'] = str(project_net[2]) if num_networks > 1: - net['label'] = "%s_%d" % (label, count) + net['label'] = '%s_%d' % (label, count) else: net['label'] = label count += 1 if(FLAGS.use_ipv6): - cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6], + cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) net['cidr_v6'] = cidr_v6 project_net_v6 = IPy.IP(cidr_v6) @@ -386,13 +387,13 @@ class FlatManager(NetworkManager): Metadata forwarding must be handled by the gateway, and since nova does not do any setup in this mode, it must be done manually. Requests to 169.254.169.254 port 80 will need to be forwarded to the api server. + """ + timeout_fixed_ips = False def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" #Fix for bug 723298 - do not call init_host on superclass #Following code has been copied for NetworkManager.init_host ctxt = context.get_admin_context() @@ -433,12 +434,11 @@ class FlatDHCPManager(NetworkManager): FlatDHCPManager will start up one dhcp server to give out addresses. It never injects network settings into the guest. Otherwise it behaves like FlatDHCPManager. + """ def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" super(FlatDHCPManager, self).init_host() self.driver.metadata_forward() @@ -490,12 +490,11 @@ class VlanManager(NetworkManager): A dhcp server is run for each subnet, so each project will have its own. For this mode to be useful, each project will need a vpn to access the instances in its subnet. + """ def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" super(VlanManager, self).init_host() self.driver.metadata_forward() @@ -566,7 +565,7 @@ class VlanManager(NetworkManager): net['vlan'] = vlan net['bridge'] = 'br%s' % vlan if(FLAGS.use_ipv6): - cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6], + cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) net['cidr_v6'] = cidr_v6 @@ -600,8 +599,8 @@ class VlanManager(NetworkManager): return self.set_network_host(context, network_ref['id']) host = rpc.call(context, FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) + {'method': 'set_network_host', + 'args': {'network_id': network_ref['id']}}) return host diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py index 9b2db7b8f..bf1070995 100644 --- a/nova/network/vmwareapi_net.py +++ b/nova/network/vmwareapi_net.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Implements vlans for vmwareapi. -""" +"""Implements vlans for vmwareapi.""" from nova import db from nova import exception @@ -27,8 +25,10 @@ from nova import utils from nova.virt.vmwareapi_conn import VMWareAPISession from nova.virt.vmwareapi import network_utils + LOG = logging.getLogger("nova.network.vmwareapi_net") + FLAGS = flags.FLAGS flags.DEFINE_string('vlan_interface', 'vmnic0', 'Physical network adapter name in VMware ESX host for ' @@ -42,10 +42,10 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): host_username = FLAGS.vmwareapi_host_username host_password = FLAGS.vmwareapi_host_password if not host_ip or host_username is None or host_password is None: - raise Exception(_("Must specify vmwareapi_host_ip," - "vmwareapi_host_username " - "and vmwareapi_host_password to use" - "connection_type=vmwareapi")) + raise Exception(_('Must specify vmwareapi_host_ip,' + 'vmwareapi_host_username ' + 'and vmwareapi_host_password to use' + 'connection_type=vmwareapi')) session = VMWareAPISession(host_ip, host_username, host_password, FLAGS.vmwareapi_api_retry_count) vlan_interface = FLAGS.vlan_interface diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py index 8c22a7d4b..709ef7f34 100644 --- a/nova/network/xenapi_net.py +++ b/nova/network/xenapi_net.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Implements vlans, bridges, and iptables rules using linux utilities. -""" +"""Implements vlans, bridges, and iptables rules using linux utilities.""" import os @@ -26,22 +24,24 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils -from nova.virt.xenapi_conn import XenAPISession +from nova.virt import xenapi_conn from nova.virt.xenapi import network_utils + LOG = logging.getLogger("nova.xenapi_net") + FLAGS = flags.FLAGS def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): """Create a vlan and bridge unless they already exist.""" # Open xenapi session - LOG.debug("ENTERING ensure_vlan_bridge in xenapi net") + LOG.debug('ENTERING ensure_vlan_bridge in xenapi net') url = FLAGS.xenapi_connection_url username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password - session = XenAPISession(url, username, password) + session = xenapi_conn.XenAPISession(url, username, password) # Check whether bridge already exists # Retrieve network whose name_label is "bridge" network_ref = network_utils.NetworkHelper.find_network_with_name_label( @@ -50,14 +50,14 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): if network_ref is None: # If bridge does not exists # 1 - create network - description = "network for nova bridge %s" % bridge + description = 'network for nova bridge %s' % bridge network_rec = {'name_label': bridge, 'name_description': description, 'other_config': {}} network_ref = session.call_xenapi('network.create', network_rec) # 2 - find PIF for VLAN - expr = 'field "device" = "%s" and \ - field "VLAN" = "-1"' % FLAGS.vlan_interface + expr = "field 'device' = '%s' and \ + field 'VLAN' = '-1'" % FLAGS.vlan_interface pifs = session.call_xenapi('PIF.get_all_records_where', expr) pif_ref = None # Multiple PIF are ok: we are dealing with a pool -- cgit From 17e06aa079b2961b7d6ba23f8032d003a2bf8b6a Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 11 May 2011 14:24:01 -0500 Subject: changes per review --- nova/network/linux_net.py | 2 -- nova/network/vmwareapi_net.py | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index af91804a1..85c4c278c 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -728,7 +728,6 @@ def _stop_dnsmasq(network): def _dhcp_file(bridge, kind): """Return path to a pid, leases or conf file for a bridge.""" - if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path, @@ -778,7 +777,6 @@ def _ra_pid_for(bridge): def _ip_bridge_cmd(action, params, device): """Build commands to add/del ips to bridges/devices.""" - cmd = ['sudo', 'ip', 'addr', action] cmd.extend(params) cmd.extend(['dev', device]) diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py index bf1070995..373060add 100644 --- a/nova/network/vmwareapi_net.py +++ b/nova/network/vmwareapi_net.py @@ -42,9 +42,9 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): host_username = FLAGS.vmwareapi_host_username host_password = FLAGS.vmwareapi_host_password if not host_ip or host_username is None or host_password is None: - raise Exception(_('Must specify vmwareapi_host_ip,' + raise Exception(_('Must specify vmwareapi_host_ip, ' 'vmwareapi_host_username ' - 'and vmwareapi_host_password to use' + 'and vmwareapi_host_password to use ' 'connection_type=vmwareapi')) session = VMWareAPISession(host_ip, host_username, host_password, FLAGS.vmwareapi_api_retry_count) -- cgit From 6de6da879c37f0a5983f4c72692db84c3dd10b22 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 11 May 2011 14:41:31 -0500 Subject: Redundant line --- nova/tests/test_compute.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 1b0e66bef..136d7a915 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -335,7 +335,6 @@ class ComputeTestCase(test.TestCase): def fake(*args, **kwargs): pass - self.stubs.Set(self.compute.driver, 'finish_resize', fake) self.stubs.Set(self.compute.driver, 'finish_resize', fake) context = self.context.elevated() instance_id = self._create_instance() -- cgit From fd8b9eb204b77da583f1aee4022920367730823f Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Wed, 11 May 2011 17:06:56 -0700 Subject: Fix remote volume code --- nova/virt/xenapi/volume_utils.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 55c11a4ad..7821a4f7e 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -204,13 +204,16 @@ def _get_volume_id(path_or_id): if isinstance(path_or_id, int): return path_or_id # n must contain at least the volume_id - # /vol- is for remote volumes - # -vol- is for local volumes + # :volume- is for remote volumes + # -volume- is for local volumes # see compute/manager->setup_compute_volume - volume_id = path_or_id[path_or_id.find('/vol-') + 1:] + volume_id = path_or_id[path_or_id.find(':volume-') + 1:] if volume_id == path_or_id: volume_id = path_or_id[path_or_id.find('-volume--') + 1:] volume_id = volume_id.replace('volume--', '') + else: + volume_id = volume_id.replace('volume-', '') + volume_id = volume_id[0:volume_id.find('-')] return int(volume_id) -- cgit From ad3f578a37001957361014c7400dbe2e8ddd0baf Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Thu, 12 May 2011 17:44:07 +0400 Subject: Added network_info into refresh_security_group_rules --- nova/tests/test_virt.py | 12 +++++++++++- nova/virt/libvirt_conn.py | 20 ++++++++++++++------ 2 files changed, 25 insertions(+), 7 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 1311ba361..874c4693f 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -849,7 +849,7 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEquals(len(rulesv4), 2) self.assertEquals(len(rulesv6), 0) - def multinic_iptables_test(self): + def test_multinic_iptables(self): ipv4_rules_per_network = 2 ipv6_rules_per_network = 3 networks_count = 5 @@ -869,6 +869,16 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEquals(ipv6_network_rules, ipv6_rules_per_network * networks_count) + def test_do_refresh_security_group_rules(self): + instance_ref = self._create_instance_ref() + self.mox.StubOutWithMock(self.fw, + 'add_filters_for_instance', + use_mock_anything=True) + self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg()) + self.fw.instances[instance_ref['id']] = instance_ref + self.mox.ReplayAll() + self.fw.do_refresh_security_group_rules("fake") + class NWFilterTestCase(test.TestCase): def setUp(self): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 555e44ce2..1e0a25a17 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1612,7 +1612,9 @@ class FirewallDriver(object): """ raise NotImplementedError() - def refresh_security_group_rules(self, security_group_id): + def refresh_security_group_rules(self, + security_group_id, + network_info=None): """Refresh security group rules from data store Gets called when a rule has been added to or removed from @@ -1911,7 +1913,9 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self._filter_container(filter_name, filter_children)) - def refresh_security_group_rules(self, security_group_id): + def refresh_security_group_rules(self, + security_group_id, + network_info=None): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -2169,15 +2173,19 @@ class IptablesFirewallDriver(FirewallDriver): def refresh_security_group_members(self, security_group): pass - def refresh_security_group_rules(self, security_group): - self.do_refresh_security_group_rules(security_group) + def refresh_security_group_rules(self, security_group, network_info=None): + self.do_refresh_security_group_rules(security_group, network_info) self.iptables.apply() @utils.synchronized('iptables', external=True) - def do_refresh_security_group_rules(self, security_group): + def do_refresh_security_group_rules(self, + security_group, + network_info=None): for instance in self.instances.values(): self.remove_filters_for_instance(instance) - self.add_filters_for_instance(instance) + if not network_info: + network_info = _get_network_info(instance) + self.add_filters_for_instance(instance, network_info) def _security_group_chain_name(self, security_group_id): return 'nova-sg-%s' % (security_group_id,) -- cgit From 22c33d80ce040f09c9bcd7584cf1165cf769e192 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 12 May 2011 10:55:04 -0400 Subject: Initial work on request extensions. --- nova/api/openstack/extensions.py | 78 +++++++++++++++++++++++ nova/tests/api/openstack/extensions/foxinsocks.py | 3 + nova/tests/api/openstack/test_extensions.py | 47 +++++++++++++- 3 files changed, 127 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 7ea7afef6..e6dd228ec 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -165,6 +165,34 @@ class ResponseExtensionController(common.OpenstackController): return res +class RequestExtensionController(common.OpenstackController): + + def __init__(self, application): + self.application = application + self.handlers = [] + + def add_handler(self, handler): + self.handlers.append(handler) + + def process(self, req, *args, **kwargs): + res = req.get_response(self.application) + content_type = req.best_match_content_type() + # currently response handlers are un-ordered + for handler in self.handlers: + res = handler(req, res) + try: + body = res.body + headers = res.headers + except AttributeError: + default_xmlns = None + body = self._serialize(res, content_type, default_xmlns) + headers = {"Content-Type": content_type} + res = webob.Response() + res.body = body + res.headers = headers + return res + + class ExtensionController(common.OpenstackController): def __init__(self, extension_manager): @@ -245,6 +273,25 @@ class ExtensionMiddleware(wsgi.Middleware): return response_ext_controllers + def _request_ext_controllers(self, application, ext_mgr, mapper): + """Returns a dict of RequestExtensionController-s by collection.""" + request_ext_controllers = {} + for req_ext in ext_mgr.get_request_extensions(): + if not req_ext.key in request_ext_controllers.keys(): + controller = RequestExtensionController(application) + mapper.connect(req_ext.url_route + '.:(format)', + action='process', + controller=controller, + conditions=req_ext.conditions) + + mapper.connect(req_ext.url_route, + action='process', + controller=controller, + conditions=req_ext.conditions) + request_ext_controllers[req_ext.key] = controller + + return request_ext_controllers + def __init__(self, application, ext_mgr=None): if ext_mgr is None: @@ -279,6 +326,14 @@ class ExtensionMiddleware(wsgi.Middleware): controller = resp_controllers[response_ext.key] controller.add_handler(response_ext.handler) + # extended requests + req_controllers = self._request_ext_controllers(application, ext_mgr, + mapper) + for request_ext in ext_mgr.get_request_extensions(): + LOG.debug(_('Extended request: %s'), request_ext.key) + controller = req_controllers[request_ext.key] + controller.add_handler(request_ext.handler) + self._router = routes.middleware.RoutesMiddleware(self._dispatch, mapper) @@ -359,6 +414,18 @@ class ExtensionManager(object): pass return response_exts + def get_request_extensions(self): + """Returns a list of RequestExtension objects.""" + request_exts = [] + for alias, ext in self.extensions.iteritems(): + try: + request_exts.extend(ext.get_request_extensions()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have request + # extensions + pass + return request_exts + def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: @@ -431,6 +498,17 @@ class ResponseExtension(object): self.key = "%s-%s" % (method, url_route) +class RequestExtension(object): + """Provide a way to handle custom request data that is sent to core + nova OpenStack API controllers. + """ + def __init__(self, method, url_route, handler): + self.url_route = url_route + self.handler = handler + self.conditions = dict(method=[method]) + self.key = "%s-%s" % (method, url_route) + + class ActionExtension(object): """Add custom actions to core nova OpenStack API controllers.""" diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py index 0860b51ac..7699ffb56 100644 --- a/nova/tests/api/openstack/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/extensions/foxinsocks.py @@ -89,6 +89,9 @@ class Foxinsocks(object): response_exts.append(resp_ext2) return response_exts + def get_request_extensions(self): + return [] + def _add_tweedle(self, input_dict, req, id): return "Tweedle Beetle Added." diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 481d34ed1..7fadb5b69 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -45,10 +45,12 @@ class StubController(nova.wsgi.Controller): class StubExtensionManager(object): - def __init__(self, resource_ext=None, action_ext=None, response_ext=None): + def __init__(self, resource_ext=None, action_ext=None, response_ext=None, + request_ext=None): self.resource_ext = resource_ext self.action_ext = action_ext self.response_ext = response_ext + self.request_ext = request_ext def get_name(self): return "Tweedle Beetle Extension" @@ -77,6 +79,12 @@ class StubExtensionManager(object): response_exts.append(self.response_ext) return response_exts + def get_request_extensions(self): + request_extensions = [] + if self.request_ext: + request_extensions.append(self.request_ext) + return request_extensions + class ExtensionControllerTest(unittest.TestCase): @@ -234,3 +242,40 @@ class ResponseExtensionTest(unittest.TestCase): response_data = json.loads(response.body) self.assertEqual(test_resp, response_data['flavor']['googoose']) self.assertEqual("Pig Bands!", response_data['big_bands']) + + +class RequestExtensionTest(unittest.TestCase): + + def setUp(self): + super(RequestExtensionTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_auth(self.stubs) + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(RequestExtensionTest, self).tearDown() + + def test_post_request_extension_with_stub_mgr(self): + + def _req_handler(req, res): + # only handle JSON responses + data = json.loads(res.body) + data['flavor']['googoose'] = req.GET.get('test_param') + return data + + resp_ext = extensions.RequestExtension('GET', + '/v1.1/flavors/:(id)', + _req_handler) + + manager = StubExtensionManager(None, None, None, resp_ext) + app = fakes.wsgi_app() + ext_midware = extensions.ExtensionMiddleware(app, manager) + request = webob.Request.blank("/v1.1/flavors/1?test_param=foo") + request.environ['api.version'] = '1.1' + response = request.get_response(ext_midware) + self.assertEqual(200, response.status_int) + response_data = json.loads(response.body) + self.assertEqual('foo', response_data['flavor']['googoose']) -- cgit From ce2b13d9fb30c0afbcff97f434d7423cad39b8b9 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 12 May 2011 12:52:32 -0400 Subject: Remove ResponseExtensions. The new RequestExtension covers both use cases. --- nova/api/openstack/extensions.py | 88 ++--------------------- nova/tests/api/openstack/extensions/foxinsocks.py | 15 ++-- nova/tests/api/openstack/test_extensions.py | 75 ++++--------------- 3 files changed, 24 insertions(+), 154 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index e6dd228ec..3a7763463 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -105,15 +105,14 @@ class ExtensionDescriptor(object): actions = [] return actions - def get_response_extensions(self): - """List of extensions.ResponseExtension extension objects. + def get_request_extensions(self): + """List of extensions.RequestException extension objects. - Response extensions are used to insert information into existing - response data. + Request extensions are used to handle custom request data. """ - response_exts = [] - return response_exts + request_exts = [] + return request_exts class ActionExtensionController(common.OpenstackController): @@ -137,34 +136,6 @@ class ActionExtensionController(common.OpenstackController): return res -class ResponseExtensionController(common.OpenstackController): - - def __init__(self, application): - self.application = application - self.handlers = [] - - def add_handler(self, handler): - self.handlers.append(handler) - - def process(self, req, *args, **kwargs): - res = req.get_response(self.application) - content_type = req.best_match_content_type() - # currently response handlers are un-ordered - for handler in self.handlers: - res = handler(res) - try: - body = res.body - headers = res.headers - except AttributeError: - default_xmlns = None - body = self._serialize(res, content_type, default_xmlns) - headers = {"Content-Type": content_type} - res = webob.Response() - res.body = body - res.headers = headers - return res - - class RequestExtensionController(common.OpenstackController): def __init__(self, application): @@ -254,25 +225,6 @@ class ExtensionMiddleware(wsgi.Middleware): return action_controllers - def _response_ext_controllers(self, application, ext_mgr, mapper): - """Returns a dict of ResponseExtensionController-s by collection.""" - response_ext_controllers = {} - for resp_ext in ext_mgr.get_response_extensions(): - if not resp_ext.key in response_ext_controllers.keys(): - controller = ResponseExtensionController(application) - mapper.connect(resp_ext.url_route + '.:(format)', - action='process', - controller=controller, - conditions=resp_ext.conditions) - - mapper.connect(resp_ext.url_route, - action='process', - controller=controller, - conditions=resp_ext.conditions) - response_ext_controllers[resp_ext.key] = controller - - return response_ext_controllers - def _request_ext_controllers(self, application, ext_mgr, mapper): """Returns a dict of RequestExtensionController-s by collection.""" request_ext_controllers = {} @@ -318,14 +270,6 @@ class ExtensionMiddleware(wsgi.Middleware): controller = action_controllers[action.collection] controller.add_action(action.action_name, action.handler) - # extended responses - resp_controllers = self._response_ext_controllers(application, ext_mgr, - mapper) - for response_ext in ext_mgr.get_response_extensions(): - LOG.debug(_('Extended response: %s'), response_ext.key) - controller = resp_controllers[response_ext.key] - controller.add_handler(response_ext.handler) - # extended requests req_controllers = self._request_ext_controllers(application, ext_mgr, mapper) @@ -402,18 +346,6 @@ class ExtensionManager(object): pass return actions - def get_response_extensions(self): - """Returns a list of ResponseExtension objects.""" - response_exts = [] - for alias, ext in self.extensions.iteritems(): - try: - response_exts.extend(ext.get_response_extensions()) - except AttributeError: - # NOTE(dprince): Extension aren't required to have response - # extensions - pass - return response_exts - def get_request_extensions(self): """Returns a list of RequestExtension objects.""" request_exts = [] @@ -488,16 +420,6 @@ class ExtensionManager(object): self.extensions[alias] = ext -class ResponseExtension(object): - """Add data to responses from core nova OpenStack API controllers.""" - - def __init__(self, method, url_route, handler): - self.url_route = url_route - self.handler = handler - self.conditions = dict(method=[method]) - self.key = "%s-%s" % (method, url_route) - - class RequestExtension(object): """Provide a way to handle custom request data that is sent to core nova OpenStack API controllers. diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py index 7699ffb56..b3f30c4e5 100644 --- a/nova/tests/api/openstack/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/extensions/foxinsocks.py @@ -63,35 +63,32 @@ class Foxinsocks(object): self._delete_tweedle)) return actions - def get_response_extensions(self): + def get_request_extensions(self): response_exts = [] - def _goose_handler(res): + def _goose_handler(req, res): #NOTE: This only handles JSON responses. # You can use content type header to test for XML. data = json.loads(res.body) - data['flavor']['googoose'] = "Gooey goo for chewy chewing!" + data['flavor']['googoose'] = req.GET.get('chewing') return data - resp_ext = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)', + resp_ext = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', _goose_handler) response_exts.append(resp_ext) - def _bands_handler(res): + def _bands_handler(req, res): #NOTE: This only handles JSON responses. # You can use content type header to test for XML. data = json.loads(res.body) data['big_bands'] = 'Pig Bands!' return data - resp_ext2 = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)', + resp_ext2 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', _bands_handler) response_exts.append(resp_ext2) return response_exts - def get_request_extensions(self): - return [] - def _add_tweedle(self, input_dict, req, id): return "Tweedle Beetle Added." diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 7fadb5b69..c63474dea 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -45,11 +45,9 @@ class StubController(nova.wsgi.Controller): class StubExtensionManager(object): - def __init__(self, resource_ext=None, action_ext=None, response_ext=None, - request_ext=None): + def __init__(self, resource_ext=None, action_ext=None, request_ext=None): self.resource_ext = resource_ext self.action_ext = action_ext - self.response_ext = response_ext self.request_ext = request_ext def get_name(self): @@ -73,12 +71,6 @@ class StubExtensionManager(object): action_exts.append(self.action_ext) return action_exts - def get_response_extensions(self): - response_exts = [] - if self.response_ext: - response_exts.append(self.response_ext) - return response_exts - def get_request_extensions(self): request_extensions = [] if self.request_ext: @@ -191,10 +183,10 @@ class ActionExtensionTest(unittest.TestCase): self.assertEqual(404, response.status_int) -class ResponseExtensionTest(unittest.TestCase): +class RequestExtensionTest(unittest.TestCase): def setUp(self): - super(ResponseExtensionTest, self).setUp() + super(RequestExtensionTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.reset_fake_data() fakes.FakeAuthDatabase.data = {} @@ -203,79 +195,38 @@ class ResponseExtensionTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() - super(ResponseExtensionTest, self).tearDown() + super(RequestExtensionTest, self).tearDown() def test_get_resources_with_stub_mgr(self): - test_resp = "Gooey goo for chewy chewing!" - - def _resp_handler(res): + def _req_handler(req, res): # only handle JSON responses data = json.loads(res.body) - data['flavor']['googoose'] = test_resp + data['flavor']['googoose'] = req.GET.get('chewing') return data - resp_ext = extensions.ResponseExtension('GET', + req_ext = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', - _resp_handler) + _req_handler) - manager = StubExtensionManager(None, None, resp_ext) + manager = StubExtensionManager(None, None, req_ext) app = fakes.wsgi_app() ext_midware = extensions.ExtensionMiddleware(app, manager) - request = webob.Request.blank("/v1.1/flavors/1") + request = webob.Request.blank("/v1.1/flavors/1?chewing=bluegoo") request.environ['api.version'] = '1.1' response = request.get_response(ext_midware) self.assertEqual(200, response.status_int) response_data = json.loads(response.body) - self.assertEqual(test_resp, response_data['flavor']['googoose']) + self.assertEqual('bluegoo', response_data['flavor']['googoose']) def test_get_resources_with_mgr(self): - test_resp = "Gooey goo for chewy chewing!" - app = fakes.wsgi_app() ext_midware = extensions.ExtensionMiddleware(app) - request = webob.Request.blank("/v1.1/flavors/1") + request = webob.Request.blank("/v1.1/flavors/1?chewing=newblue") request.environ['api.version'] = '1.1' response = request.get_response(ext_midware) self.assertEqual(200, response.status_int) response_data = json.loads(response.body) - self.assertEqual(test_resp, response_data['flavor']['googoose']) + self.assertEqual('newblue', response_data['flavor']['googoose']) self.assertEqual("Pig Bands!", response_data['big_bands']) - - -class RequestExtensionTest(unittest.TestCase): - - def setUp(self): - super(RequestExtensionTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_auth(self.stubs) - self.context = context.get_admin_context() - - def tearDown(self): - self.stubs.UnsetAll() - super(RequestExtensionTest, self).tearDown() - - def test_post_request_extension_with_stub_mgr(self): - - def _req_handler(req, res): - # only handle JSON responses - data = json.loads(res.body) - data['flavor']['googoose'] = req.GET.get('test_param') - return data - - resp_ext = extensions.RequestExtension('GET', - '/v1.1/flavors/:(id)', - _req_handler) - - manager = StubExtensionManager(None, None, None, resp_ext) - app = fakes.wsgi_app() - ext_midware = extensions.ExtensionMiddleware(app, manager) - request = webob.Request.blank("/v1.1/flavors/1?test_param=foo") - request.environ['api.version'] = '1.1' - response = request.get_response(ext_midware) - self.assertEqual(200, response.status_int) - response_data = json.loads(response.body) - self.assertEqual('foo', response_data['flavor']['googoose']) -- cgit From e03921c2799acf36083eb13c3134b861bc4732a6 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 12 May 2011 14:37:15 -0400 Subject: Make it so that ExtensionRequest objects now return proper webob objects. This avoids the odd serialization code in the RequestExtensionController class which converts JSON dicts to webobs for us. --- nova/api/openstack/extensions.py | 11 ----------- nova/tests/api/openstack/extensions/foxinsocks.py | 6 ++++-- nova/tests/api/openstack/test_extensions.py | 3 ++- 3 files changed, 6 insertions(+), 14 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 3a7763463..ac79b9310 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -147,20 +147,9 @@ class RequestExtensionController(common.OpenstackController): def process(self, req, *args, **kwargs): res = req.get_response(self.application) - content_type = req.best_match_content_type() # currently response handlers are un-ordered for handler in self.handlers: res = handler(req, res) - try: - body = res.body - headers = res.headers - except AttributeError: - default_xmlns = None - body = self._serialize(res, content_type, default_xmlns) - headers = {"Content-Type": content_type} - res = webob.Response() - res.body = body - res.headers = headers return res diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py index b3f30c4e5..f8e31589a 100644 --- a/nova/tests/api/openstack/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/extensions/foxinsocks.py @@ -71,7 +71,8 @@ class Foxinsocks(object): # You can use content type header to test for XML. data = json.loads(res.body) data['flavor']['googoose'] = req.GET.get('chewing') - return data + res.body = json.dumps(data) + return res resp_ext = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', _goose_handler) @@ -82,7 +83,8 @@ class Foxinsocks(object): # You can use content type header to test for XML. data = json.loads(res.body) data['big_bands'] = 'Pig Bands!' - return data + res.body = json.dumps(data) + return res resp_ext2 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', _bands_handler) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index c63474dea..544298602 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -203,7 +203,8 @@ class RequestExtensionTest(unittest.TestCase): # only handle JSON responses data = json.loads(res.body) data['flavor']['googoose'] = req.GET.get('chewing') - return data + res.body = json.dumps(data) + return res req_ext = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', -- cgit From 27b5de353aee88d37c369bb5b019a746116732c0 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 12 May 2011 14:45:39 -0400 Subject: Variable renaming. --- nova/tests/api/openstack/extensions/foxinsocks.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py index f8e31589a..dbdd0928a 100644 --- a/nova/tests/api/openstack/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/extensions/foxinsocks.py @@ -64,7 +64,7 @@ class Foxinsocks(object): return actions def get_request_extensions(self): - response_exts = [] + request_exts = [] def _goose_handler(req, res): #NOTE: This only handles JSON responses. @@ -74,9 +74,9 @@ class Foxinsocks(object): res.body = json.dumps(data) return res - resp_ext = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', + req_ext1 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', _goose_handler) - response_exts.append(resp_ext) + request_exts.append(req_ext1) def _bands_handler(req, res): #NOTE: This only handles JSON responses. @@ -86,10 +86,10 @@ class Foxinsocks(object): res.body = json.dumps(data) return res - resp_ext2 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', + req_ext2 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', _bands_handler) - response_exts.append(resp_ext2) - return response_exts + request_exts.append(req_ext2) + return request_exts def _add_tweedle(self, input_dict, req, id): -- cgit From e72667cb125f1d970f302bb18f051380fac0711d Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 12 May 2011 14:52:54 -0400 Subject: Update comment. --- nova/api/openstack/extensions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index ac79b9310..0e729e137 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -147,7 +147,7 @@ class RequestExtensionController(common.OpenstackController): def process(self, req, *args, **kwargs): res = req.get_response(self.application) - # currently response handlers are un-ordered + # currently request handlers are un-ordered for handler in self.handlers: res = handler(req, res) return res -- cgit From e7662bfcead8df8cc1fc655af6da15dc47777565 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 12 May 2011 13:42:04 -0700 Subject: fix for lp760921. Previously, if tune2fs failed, as it does on windows hosts, kpartx -d also failed to be called which leaves mapped partitions that retain holds on the nbd device. These holds cause the observed errors. --- nova/virt/disk.py | 50 ++++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 24 deletions(-) (limited to 'nova') diff --git a/nova/virt/disk.py b/nova/virt/disk.py index ddea1a1f7..f8aea1f34 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -81,34 +81,36 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): else: mapped_device = device - # We can only loopback mount raw images. If the device isn't there, - # it's normally because it's a .vmdk or a .vdi etc - if not os.path.exists(mapped_device): - raise exception.Error('Mapped device was not found (we can' - ' only inject raw disk images): %s' % - mapped_device) - - # Configure ext2fs so that it doesn't auto-check every N boots - out, err = utils.execute('sudo', 'tune2fs', - '-c', 0, '-i', 0, mapped_device) - - tmpdir = tempfile.mkdtemp() try: - # mount loopback to dir - out, err = utils.execute( - 'sudo', 'mount', mapped_device, tmpdir) - if err: - raise exception.Error(_('Failed to mount filesystem: %s') - % err) - + # We can only loopback mount raw images. If the device isn't there, + # it's normally because it's a .vmdk or a .vdi etc + if not os.path.exists(mapped_device): + raise exception.Error('Mapped device was not found (we can' + ' only inject raw disk images): %s' % + mapped_device) + + # Configure ext2fs so that it doesn't auto-check every N boots + out, err = utils.execute('sudo', 'tune2fs', + '-c', 0, '-i', 0, mapped_device) + + tmpdir = tempfile.mkdtemp() try: - inject_data_into_fs(tmpdir, key, net, utils.execute) + # mount loopback to dir + out, err = utils.execute( + 'sudo', 'mount', mapped_device, tmpdir) + if err: + raise exception.Error(_('Failed to mount filesystem: %s') + % err) + + try: + inject_data_into_fs(tmpdir, key, net, utils.execute) + finally: + # unmount device + utils.execute('sudo', 'umount', mapped_device) finally: - # unmount device - utils.execute('sudo', 'umount', mapped_device) + # remove temporary directory + utils.execute('rmdir', tmpdir) finally: - # remove temporary directory - utils.execute('rmdir', tmpdir) if not partition is None: # remove partitions utils.execute('sudo', 'kpartx', '-d', device) -- cgit From dbff37b9ae0893ce209ff0b8c8893987226bd081 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Thu, 12 May 2011 17:24:38 -0400 Subject: handle instance_type_ids that are NULL during upgrade to integers --- .../migrate_repo/versions/016_make_instance_type_id_an_integer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py index 5d95c1024..e2d03ebf2 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py @@ -12,7 +12,7 @@ def upgrade(migrate_engine): for instance in migrate_engine.execute(instances.select()): try: types[instance.id] = int(instance.instance_type_id) - except ValueError: + except (ValueError, TypeError): types[instance.id] = None integer_column = Column('instance_type_id_int', Integer(), nullable=True) -- cgit From bbbea57cf6ab28c3ad1081041275e0d6d2bbd308 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 13 May 2011 23:08:57 +0900 Subject: volume/driver: factor out lvm opration Factor out lvm operation for implementing basic snapshot later. --- nova/volume/driver.py | 62 ++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 28 deletions(-) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ba0a7efef..ec7be37bf 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -90,37 +90,47 @@ class VolumeDriver(object): raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) - def create_volume(self, volume): - """Creates a logical volume. Can optionally return a Dictionary of - changes to the volume object to be persisted.""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] + def _create_volume(self, volume_name, sizestr): self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', - volume['name'], - FLAGS.volume_group) - - def delete_volume(self, volume): - """Deletes a logical volume.""" + volume_name, FLAGS.volume_group) + + def _copy_volume(self, srcstr, deststr, size_in_g): + self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), 'bs=1M') + + def _volume_not_present(self, volume_name): + path_name = '%s/%s' % (FLAGS.volume_group, volume_name) try: - self._try_execute('sudo', 'lvdisplay', - '%s/%s' % - (FLAGS.volume_group, - volume['name'])) + self._try_execute('sudo', 'lvdisplay', path_name) except Exception as e: - # If the volume isn't present, then don't attempt to delete + # If the volume isn't present return True + return False + def _delete_volume(self, volume, size_in_g): + """Deletes a logical volume.""" # zero out old volumes to prevent data leaking between users # TODO(ja): reclaiming space should be done lazy and low priority - self._execute('sudo', 'dd', 'if=/dev/zero', - 'of=%s' % self.local_path(volume), - 'count=%d' % (volume['size'] * 1024), - 'bs=1M') + self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, - volume['name'])) + (FLAGS.volume_group, volume['name'])) + + def _sizestr(self, size_in_g): + if int(size_in_g) == 0: + return '100M' + return '%sG' % size_in_g + + def create_volume(self, volume): + """Creates a logical volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" + self._create_volume(volume['name'], self._sizestr(volume['size'])) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + if self._volume_not_present(volume['name']): + # If the volume isn't present, then don't attempt to delete + return True + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" @@ -608,13 +618,9 @@ class SheepdogDriver(VolumeDriver): def create_volume(self, volume): """Creates a sheepdog volume""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] self._try_execute('qemu-img', 'create', "sheepdog:%s" % volume['name'], - sizestr) + self._sizestr(volume['size'])) def delete_volume(self, volume): """Deletes a logical volume""" -- cgit From 4f7cfba4a00f04b7c30c61da2946f183241a7c7f Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 13 May 2011 23:27:35 +0900 Subject: volume/driver: implement basic snapshot added basic support for snapshot to VolumeDriver base class. The implementation is not effective, but works. The effective implementation should be done by drived driver class. --- nova/exception.py | 4 ++++ nova/volume/driver.py | 37 +++++++++++++++++++++++++++++++++---- nova/volume/manager.py | 6 ++++++ 3 files changed, 43 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/exception.py b/nova/exception.py index 39620ccc1..bd04435ed 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -271,6 +271,10 @@ class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") +class VolumeIsBusy(Error): + message = _("deleting volume %(volume_name)s that has snapshot") + + class ExportDeviceNotFoundForVolume(NotFound): message = _("No export device found for volume %(volume_id)s.") diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ec7be37bf..a6cf2cb46 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -113,13 +113,21 @@ class VolumeDriver(object): # TODO(ja): reclaiming space should be done lazy and low priority self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, volume['name'])) + (FLAGS.volume_group, + self._escape_snapshot(volume['name']))) def _sizestr(self, size_in_g): if int(size_in_g) == 0: return '100M' return '%sG' % size_in_g + # Linux LVM reserves name that starts with snapshot, so that + # such volume name can't be created. Mangle it. + def _escape_snapshot(self, snapshot_name): + if not snapshot_name.startswith('snapshot'): + return snapshot_name + return '_' + snapshot_name + def create_volume(self, volume): """Creates a logical volume. Can optionally return a Dictionary of changes to the volume object to be persisted.""" @@ -130,20 +138,41 @@ class VolumeDriver(object): if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True + + # TODO(yamahata): lvm can't delete origin volume only without + # deleting derived snapshots. Can we do something fancy? + out, err = self._execute('sudo', 'lvdisplay', '--noheading', + '-C', '-o', 'Attr', + '%s/%s' % (FLAGS.volume_group, + volume['name'])) + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy(volume_name=volume['name']) + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" - raise NotImplementedError() + orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name']) + self._try_execute('sudo', 'lvcreate', '-L', + self._sizestr(snapshot['volume_size']), + '--name', self._escape_snapshot(snapshot['name']), + '--snapshot', orig_lv_name) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" - raise NotImplementedError() + if self._volume_not_present(self._escape_snapshot(snapshot['name'])): + # If the snapshot isn't present, then don't attempt to delete + return True + + # TODO(yamahata): zeroing out the whole snapshot triggers COW. + # it's quite slow. + self._delete_volume(snapshot, snapshot['volume_size']) def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') - escaped_name = volume['name'].replace('-', '--') + escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def ensure_export(self, context, volume): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 87fd3bf17..fd889633d 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -142,6 +142,12 @@ class VolumeManager(manager.SchedulerDependentManager): self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) + except exception.VolumeIsBusy, e: + LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) + self.driver.ensure_export(context, volume_ref) + self.db.volume_update(context, volume_ref['id'], + {'status': 'available'}) + return True except Exception: self.db.volume_update(context, volume_ref['id'], -- cgit From aaec8400be701c674bbf89badd59ee9468827ed9 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Sat, 14 May 2011 01:42:26 +0900 Subject: volume/driver: make unit test, test_volume, pass fake command executer doesn't return command result. Which return None instead of string. So add None check to make unit test pass. --- nova/volume/driver.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index a6cf2cb46..0807ff476 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -145,9 +145,11 @@ class VolumeDriver(object): '-C', '-o', 'Attr', '%s/%s' % (FLAGS.volume_group, volume['name'])) - out = out.strip() - if (out[0] == 'o') or (out[0] == 'O'): - raise exception.VolumeIsBusy(volume_name=volume['name']) + # fake_execute returns None resulting unit test error + if out: + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume, volume['size']) -- cgit From 8b86fb3a4d9ee3e328232c0051b9daff6838d00d Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Fri, 13 May 2011 10:26:13 -0700 Subject: Add support for rbd snapshots. --- nova/volume/driver.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 0807ff476..e0e18b9bf 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -608,6 +608,18 @@ class RBDDriver(VolumeDriver): self._try_execute('rbd', '--pool', FLAGS.rbd_pool, 'rm', volume['name']) + def create_snapshot(self, snapshot): + """Creates an rbd snapshot""" + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'snap', 'create', '--snap', snapshot['name'], + snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes an rbd snapshot""" + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'snap', 'rm', '--snap', snapshot['name'], + snapshot['volume_name']) + def local_path(self, volume): """Returns the path of the rbd volume.""" # This is the same as the remote path -- cgit From 5eb57c6191ac7c8d98539eb3967ceb00f7c55daf Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Mon, 16 May 2011 16:29:21 +0900 Subject: Add a unit test for snapshot_volume. --- nova/tests/test_volume.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'nova') diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 236d12434..c66b66959 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -176,6 +176,33 @@ class VolumeTestCase(test.TestCase): # This will allow us to test cross-node interactions pass + @staticmethod + def _create_snapshot(volume_id, size='0'): + """Create a snapshot object.""" + snap = {} + snap['volume_size'] = size + snap['user_id'] = 'fake' + snap['project_id'] = 'fake' + snap['volume_id'] = volume_id + snap['status'] = "creating" + return db.snapshot_create(context.get_admin_context(), snap)['id'] + + def test_create_delete_snapshot(self): + """Test snapshot can be created and deleted.""" + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + snapshot_id = self._create_snapshot(volume_id) + self.volume.create_snapshot(self.context, volume_id, snapshot_id) + self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), + snapshot_id).id) + + self.volume.delete_snapshot(self.context, snapshot_id) + self.assertRaises(exception.NotFound, + db.snapshot_get, + self.context, + snapshot_id) + self.volume.delete_volume(self.context, volume_id) + class DriverTestCase(test.TestCase): """Base Test class for Drivers.""" -- cgit From d44299be90bbfcac5f8de1e1264b81fbb0bfa5e2 Mon Sep 17 00:00:00 2001 From: Masanori Itoh Date: Tue, 17 May 2011 01:00:16 +0900 Subject: Add vnc_keymap flag and enable setting keymap for vnc console. --- nova/virt/libvirt.xml.template | 2 +- nova/virt/libvirt_conn.py | 1 + nova/vnc/__init__.py | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index de2497a76..20986d4d5 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -116,7 +116,7 @@ #if $getVar('vncserver_host', False) - + #end if diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 555e44ce2..7552c9488 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1022,6 +1022,7 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.vnc_enabled: if FLAGS.libvirt_type != 'lxc': xml_info['vncserver_host'] = FLAGS.vncserver_host + xml_info['vnc_keymap'] = FLAGS.vnc_keymap if not rescue: if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py index b5b00e44e..859bfd65f 100644 --- a/nova/vnc/__init__.py +++ b/nova/vnc/__init__.py @@ -32,3 +32,5 @@ flags.DEFINE_string('vncserver_host', '0.0.0.0', 'the host interface on which vnc server should listen') flags.DEFINE_bool('vnc_enabled', True, 'enable vnc related features') +flags.DEFINE_string('vnc_keymap', 'en-us', + 'keymap for vnc') -- cgit From 8cf2087747ab87fec0e1f7cc3d57ed1fa5065749 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Mon, 16 May 2011 14:50:07 -0400 Subject: add a todo --- nova/api/openstack/limits.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 47bc238f1..f30c9ec59 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -64,7 +64,9 @@ class LimitsController(common.OpenstackController): """ Return all global and rate limit information. """ - abs_limits = {} + # TODO(alex.meade) make this work + project_quota = quota.get_project_quota(...) + abs_limits = project_quota.limits rate_limits = req.environ.get("nova.limits", []) builder = self._get_view_builder(req) -- cgit From ea847e600249f1e3b65e04cfaa67014508c26e95 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 16 May 2011 15:16:34 -0500 Subject: Merge prop changes --- nova/flags.py | 2 +- nova/notifier/api.py | 20 ++++++++++++-------- nova/notifier/log_notifier.py | 19 +++++++++---------- nova/notifier/no_op_notifier.py | 9 +++------ nova/notifier/rabbit_notifier.py | 19 ++++++++----------- nova/tests/test_notifier.py | 22 +++++++++++----------- 6 files changed, 44 insertions(+), 47 deletions(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index a1f7f71c8..32cb6efa8 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -370,7 +370,7 @@ DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') DEFINE_string('notification_driver', - 'nova.notifier.no_op_notifier.NoopNotifier', + 'nova.notifier.no_op_notifier', 'Default driver for sending notifications') DEFINE_list('memcached_servers', None, 'Memcached servers or None for in process cache.') diff --git a/nova/notifier/api.py b/nova/notifier/api.py index 4fcfa84ff..5b9b8ea29 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -37,21 +37,25 @@ class BadPriorityException(Exception): pass -def notify(event_name, publisher_id, event_type, priority, message): +def notify(publisher_id, event_type, priority, message): """ Sends a notification using the specified driver - Message format is as follows: + Notify parameters: - message_id - a UUID representing the id for this notification publisher_id - the source worker_type.host of the message - timestamp - the GMT timestamp the notification was sent at event_type - the literal type of event (ex. Instance Creation) priority - patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) message - A python dictionary of attributes - The message payload will be constructed as a dictionary of the above + Outgoing message format includes the above parameters, and appends the + following: + + message_id - a UUID representing the id for this notification + timestamp - the GMT timestamp the notification was sent at + + The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. @@ -62,17 +66,17 @@ def notify(event_name, publisher_id, event_type, priority, message): 'timestamp': datetime.datetime.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', - 'message': {'instance_id': 12, ... }} + 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities' % priority)) - driver = utils.import_class(FLAGS.notification_driver)() + driver = utils.import_object(FLAGS.notification_driver) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, - message=message, + payload=message, timestamp=str(datetime.datetime.utcnow())) driver.notify(msg) diff --git a/nova/notifier/log_notifier.py b/nova/notifier/log_notifier.py index f072a6125..a3df31721 100644 --- a/nova/notifier/log_notifier.py +++ b/nova/notifier/log_notifier.py @@ -21,14 +21,13 @@ from nova import log as logging FLAGS = flags.FLAGS -class LogNotifier(object): - """Log notifications using nova's default logging system""" +def notify(message): + """Notifies the recipient of the desired event given the model. + Log notifications using nova's default logging system""" - def notify(self, message): - """Notifies the recipient of the desired event given the model""" - priority = message.get('priority', - FLAGS.default_notification_level) - priority = priority.lower() - logger = logging.getLogger( - 'nova.notification.%s' % message['event_type']) - getattr(logger, priority)(json.dumps(message)) + priority = message.get('priority', + FLAGS.default_notification_level) + priority = priority.lower() + logger = logging.getLogger( + 'nova.notification.%s' % message['event_type']) + getattr(logger, priority)(json.dumps(message)) diff --git a/nova/notifier/no_op_notifier.py b/nova/notifier/no_op_notifier.py index f5e745f1f..029710505 100644 --- a/nova/notifier/no_op_notifier.py +++ b/nova/notifier/no_op_notifier.py @@ -14,9 +14,6 @@ # under the License. -class NoopNotifier(object): - """A notifier that doesn't actually do anything. Simply a placeholder""" - - def notify(self, message): - """Notifies the recipient of the desired event given the model""" - pass +def notify(message): + """Notifies the recipient of the desired event given the model""" + pass diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py index 7e2ee5f0b..acab79658 100644 --- a/nova/notifier/rabbit_notifier.py +++ b/nova/notifier/rabbit_notifier.py @@ -25,14 +25,11 @@ flags.DEFINE_string('notification_topic', 'notifications', 'RabbitMQ topic used for Nova notifications') -class RabbitNotifier(object): - """Sends notifications to a specific RabbitMQ server and topic""" - - def notify(self, message): - """Sends a notification to the RabbitMQ""" - context = nova.context.get_admin_context() - priority = message.get('priority', - FLAGS.default_notification_level) - priority = priority.lower() - topic = '%s.%s' % (FLAGS.notification_topic, priority) - rpc.cast(context, topic, message) +def notify(message): + """Sends a notification to the RabbitMQ""" + context = nova.context.get_admin_context() + priority = message.get('priority', + FLAGS.default_notification_level) + priority = priority.lower() + topic = '%s.%s' % (FLAGS.notification_topic, priority) + rpc.cast(context, topic, message) diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 82c4d3f5a..b6b0fcc68 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -43,12 +43,12 @@ class NotifierTestCase(test.TestCase): def mock_notify(cls, *args): self.notify_called = True - self.stubs.Set(nova.notifier.no_op_notifier.NoopNotifier, 'notify', + self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify) class Mock(object): pass - notify('event_name', 'publisher_id', 'event_type', + notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_called, True) @@ -56,24 +56,24 @@ class NotifierTestCase(test.TestCase): """A test to ensure changing the message format is prohibitively annoying""" - def message_assert(cls, message): + def message_assert(message): fields = [('publisher_id', 'publisher_id'), ('event_type', 'event_type'), ('priority', 'WARN'), - ('message', dict(a=3))] + ('payload', dict(a=3))] for k, v in fields: self.assertEqual(message[k], v) self.assertTrue(len(message['message_id']) > 0) self.assertTrue(len(message['timestamp']) > 0) - self.stubs.Set(nova.notifier.no_op_notifier.NoopNotifier, 'notify', + self.stubs.Set(nova.notifier.no_op_notifier, 'notify', message_assert) - notify('event_name', 'publisher_id', 'event_type', + notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) def test_send_rabbit_notification(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', - 'nova.notifier.rabbit_notifier.RabbitNotifier') + 'nova.notifier.rabbit_notifier') self.mock_cast = False def mock_cast(cls, *args): @@ -83,7 +83,7 @@ class NotifierTestCase(test.TestCase): pass self.stubs.Set(nova.rpc, 'cast', mock_cast) - notify('event_name', 'publisher_id', 'event_type', + notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.mock_cast, True) @@ -97,12 +97,12 @@ class NotifierTestCase(test.TestCase): self.stubs.Set(nova.rpc, 'cast', mock_cast) self.assertRaises(nova.notifier.api.BadPriorityException, - notify, 'event_name', 'publisher_id', + notify, 'publisher_id', 'event_type', 'not a priority', dict(a=3)) def test_rabbit_priority_queue(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', - 'nova.notifier.rabbit_notifier.RabbitNotifier') + 'nova.notifier.rabbit_notifier') self.stubs.Set(nova.flags.FLAGS, 'notification_topic', 'testnotify') @@ -112,6 +112,6 @@ class NotifierTestCase(test.TestCase): self.test_topic = topic self.stubs.Set(nova.rpc, 'cast', mock_cast) - notify('event_name', 'publisher_id', + notify('publisher_id', 'event_type', 'DEBUG', dict(a=3)) self.assertEqual(self.test_topic, 'testnotify.debug') -- cgit From 9fb47870df17e66a2294af7f52eb2dc5845405c0 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 16 May 2011 15:45:40 -0500 Subject: Conceded :-D --- nova/notifier/api.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/notifier/api.py b/nova/notifier/api.py index 5b9b8ea29..a2231055a 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -37,7 +37,7 @@ class BadPriorityException(Exception): pass -def notify(publisher_id, event_type, priority, message): +def notify(publisher_id, event_type, priority, payload): """ Sends a notification using the specified driver @@ -47,7 +47,7 @@ def notify(publisher_id, event_type, priority, message): event_type - the literal type of event (ex. Instance Creation) priority - patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) - message - A python dictionary of attributes + payload - A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: @@ -77,6 +77,6 @@ def notify(publisher_id, event_type, priority, message): publisher_id=publisher_id, event_type=event_type, priority=priority, - payload=message, + payload=payload, timestamp=str(datetime.datetime.utcnow())) driver.notify(msg) -- cgit From b00d39083901731a4345c0a0b13ce98f1dfaaf01 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Mon, 16 May 2011 17:58:07 -0400 Subject: add logging to migration and fix migration version --- .../016_make_instance_type_id_an_integer.py | 61 -------------------- .../017_make_instance_type_id_an_integer.py | 67 ++++++++++++++++++++++ 2 files changed, 67 insertions(+), 61 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py deleted file mode 100644 index e2d03ebf2..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_instance_type_id_an_integer.py +++ /dev/null @@ -1,61 +0,0 @@ -from sqlalchemy import Column, Integer, MetaData, String, Table - -meta = MetaData() - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - types = {} - for instance in migrate_engine.execute(instances.select()): - try: - types[instance.id] = int(instance.instance_type_id) - except (ValueError, TypeError): - types[instance.id] = None - - integer_column = Column('instance_type_id_int', Integer(), nullable=True) - string_column = instances.c.instance_type_id - - integer_column.create(instances) - for instance_id, instance_type_id in types.iteritems(): - update = instances.update().\ - where(instances.c.id == instance_id).\ - values(instance_type_id_int=instance_type_id) - migrate_engine.execute(update) - - string_column.alter(name='instance_type_id_str') - integer_column.alter(name='instance_type_id') - string_column.drop() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - integer_column = instances.c.instance_type_id - string_column = Column('instance_type_id_str', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - types = {} - for instance in migrate_engine.execute(instances.select()): - if instance.instance_type_id is None: - types[instance.id] = None - else: - types[instance.id] = str(instance.instance_type_id) - - string_column.create(instances) - for instance_id, instance_type_id in types.iteritems(): - update = instances.update().\ - where(instances.c.id == instance_id).\ - values(instance_type_id_str=instance_type_id) - migrate_engine.execute(update) - - integer_column.alter(name='instance_type_id_int') - string_column.alter(name='instance_type_id') - integer_column.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py new file mode 100644 index 000000000..8da9108ce --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py @@ -0,0 +1,67 @@ +from sqlalchemy import Column, Integer, MetaData, String, Table +from nova import log as logging + +meta = MetaData() + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + try: + types[instance.id] = int(instance.instance_type_id) + except ValueError: + logging.warn("Instance %s did not have instance_type_id " + "converted to an integer because its value is %s" % + (instance.id, instance.instance_type_id)) + types[instance.id] = None + + integer_column = Column('instance_type_id_int', Integer(), nullable=True) + string_column = instances.c.instance_type_id + + integer_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_int=instance_type_id) + migrate_engine.execute(update) + + string_column.alter(name='instance_type_id_str') + integer_column.alter(name='instance_type_id') + string_column.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + integer_column = instances.c.instance_type_id + string_column = Column('instance_type_id_str', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + else: + types[instance.id] = str(instance.instance_type_id) + + string_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_str=instance_type_id) + migrate_engine.execute(update) + + integer_column.alter(name='instance_type_id_int') + string_column.alter(name='instance_type_id') + integer_column.drop() -- cgit From d9eb72b3e76bd019b817936caa40c897416394b0 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 16 May 2011 17:36:42 -0500 Subject: Added missing metadata join to instance_get calls. --- nova/db/sqlalchemy/api.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 3681f30db..b53e81053 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -873,6 +873,7 @@ def instance_get_all(context): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -885,6 +886,7 @@ def instance_get_all_by_user(context, user_id): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(user_id=user_id).\ -- cgit From 6404e2bb02f0736c43ef37292c1143e58903c5db Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 16 May 2011 23:14:51 -0400 Subject: Update comment on RequestExtension class. --- nova/api/openstack/extensions.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 0e729e137..8e77b25fb 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -410,8 +410,11 @@ class ExtensionManager(object): class RequestExtension(object): - """Provide a way to handle custom request data that is sent to core - nova OpenStack API controllers. + """Extend requests and responses of core nova OpenStack API controllers. + + Provide a way to add data to responses and handle custom request data + that is sent to core nova OpenStack API controllers. + """ def __init__(self, method, url_route, handler): self.url_route = url_route -- cgit From f51bd03c9ce5f4248cb6f10e3ed662ae6ba33ebd Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 17 May 2011 15:49:31 +0000 Subject: Instead of using a temp file with openssl, just write directly to stdin --- nova/virt/xenapi/vmops.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 13d7d215b..0074444f8 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -25,7 +25,6 @@ import M2Crypto import os import pickle import subprocess -import tempfile import uuid from nova import context @@ -1163,18 +1162,17 @@ class SimpleDH(object): return mpi def _run_ssl(self, text, which): - base_cmd = ('cat %(tmpfile)s | openssl enc -aes-128-cbc ' - '-a -pass pass:%(shared)s -nosalt %(dec_flag)s') + base_cmd = ('openssl enc -aes-128-cbc -a -pass pass:%(shared)s ' + '-nosalt %(dec_flag)s') if which.lower()[0] == 'd': dec_flag = ' -d' else: dec_flag = '' - fd, tmpfile = tempfile.mkstemp() - os.close(fd) - file(tmpfile, 'w').write(text) shared = self._shared cmd = base_cmd % locals() proc = _runproc(cmd) + proc.stdin.write(text) + proc.stdin.close() proc.wait() err = proc.stderr.read() if err: -- cgit From 4dfe3a6b9ae44654b50ea8d65ac18a7a10f5abe3 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Tue, 17 May 2011 12:33:58 -0400 Subject: don't throw type errors on NoneType int conversions --- .../migrate_repo/versions/017_make_instance_type_id_an_integer.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py index 8da9108ce..cda890c94 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py @@ -13,6 +13,7 @@ def upgrade(migrate_engine): for instance in migrate_engine.execute(instances.select()): if instance.instance_type_id is None: types[instance.id] = None + continue try: types[instance.id] = int(instance.instance_type_id) except ValueError: -- cgit From b312ac2634f530273e599ee48ff2e3a238bbbf4f Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 17 May 2011 16:46:13 +0000 Subject: Set password to one requested in API call --- nova/api/openstack/servers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8f2de2afe..fcb630fae 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -609,7 +609,8 @@ class ControllerV10(Controller): def _parse_update(self, context, server_id, inst_dict, update_dict): if 'adminPass' in inst_dict['server']: update_dict['admin_pass'] = inst_dict['server']['adminPass'] - self.compute_api.set_admin_password(context, server_id) + self.compute_api.set_admin_password(context, server_id, + inst_dict['server']['adminPass']) def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] -- cgit From 34b1461fa567f5ffba89b893b8082df050a64a5e Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Tue, 17 May 2011 13:46:15 -0400 Subject: add ram limits to instance quotas --- nova/db/api.py | 2 +- nova/db/sqlalchemy/api.py | 5 +++-- nova/quota.py | 26 +++++++++++++++++--------- 3 files changed, 21 insertions(+), 12 deletions(-) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index ef8aa1143..f341ffeb5 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -403,7 +403,7 @@ def instance_create(context, values): def instance_data_get_for_project(context, project_id): - """Get (instance_count, core_count) for project.""" + """Get (instance_count, core_count, ram_count) for project.""" return IMPL.instance_data_get_for_project(context, project_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ea0bbb06e..6d34a87ca 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -802,12 +802,13 @@ def instance_create(context, values): def instance_data_get_for_project(context, project_id): session = get_session() result = session.query(func.count(models.Instance.id), - func.sum(models.Instance.vcpus)).\ + func.sum(models.Instance.vcpus), + func.sum(models.Instance.memory_mb)).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ first() # NOTE(vish): convert None to 0 - return (result[0] or 0, result[1] or 0) + return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context diff --git a/nova/quota.py b/nova/quota.py index a93cd0766..b37fa5ba4 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -28,6 +28,8 @@ flags.DEFINE_integer('quota_instances', 10, 'number of instances allowed per project') flags.DEFINE_integer('quota_cores', 20, 'number of instance cores allowed per project') +flags.DEFINE_integer('quota_ram', 50 * 1024, + 'megabytes of instance ram allowed per project') flags.DEFINE_integer('quota_volumes', 10, 'number of volumes allowed per project') flags.DEFINE_integer('quota_gigabytes', 1000, @@ -45,12 +47,15 @@ flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255, def get_quota(context, project_id): - rval = {'instances': FLAGS.quota_instances, - 'cores': FLAGS.quota_cores, - 'volumes': FLAGS.quota_volumes, - 'gigabytes': FLAGS.quota_gigabytes, - 'floating_ips': FLAGS.quota_floating_ips, - 'metadata_items': FLAGS.quota_metadata_items} + rval = { + 'instances': FLAGS.quota_instances, + 'cores': FLAGS.quota_cores, + 'ram': FLAGS.quota_ram, + 'volumes': FLAGS.quota_volumes, + 'gigabytes': FLAGS.quota_gigabytes, + 'floating_ips': FLAGS.quota_floating_ips, + 'metadata_items': FLAGS.quota_metadata_items, + } quota = db.quota_get_all_by_project(context, project_id) for key in rval.keys(): @@ -70,15 +75,18 @@ def allowed_instances(context, num_instances, instance_type): project_id = context.project_id context = context.elevated() num_cores = num_instances * instance_type['vcpus'] - used_instances, used_cores = db.instance_data_get_for_project(context, - project_id) + num_ram = num_instances * instance_type['memory_mb'] + usage = db.instance_data_get_for_project(context, project_id) + used_instances, used_cores, used_ram = usage quota = get_quota(context, project_id) allowed_instances = _get_request_allotment(num_instances, used_instances, quota['instances']) allowed_cores = _get_request_allotment(num_cores, used_cores, quota['cores']) + allowed_ram = _get_request_allotment(num_ram, used_ram, quota['ram']) allowed_instances = min(allowed_instances, - int(allowed_cores // instance_type['vcpus'])) + allowed_cores // instance_type['vcpus'], + allowed_ram // instance_type['memory_mb']) return min(num_instances, allowed_instances) -- cgit From 6c27e24a559722a5a82d8883f508a77d281956f5 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 17 May 2011 17:51:22 +0000 Subject: Avoid using spawn_n to fix LP784132 --- nova/api/openstack/servers.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index fcb630fae..bd9711555 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -609,7 +609,9 @@ class ControllerV10(Controller): def _parse_update(self, context, server_id, inst_dict, update_dict): if 'adminPass' in inst_dict['server']: update_dict['admin_pass'] = inst_dict['server']['adminPass'] - self.compute_api.set_admin_password(context, server_id, + # We call _set_admin_password() here to avoid the spawn_n in + # set_admin_password() + self.compute_api._set_admin_password(context, server_id, inst_dict['server']['adminPass']) def _action_rebuild(self, info, request, instance_id): -- cgit From 11a36377f81f6f4c6c20e5802aa91e472772fbc9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 May 2011 11:31:09 -0700 Subject: make token use typo that is in database. Also fix now -> utcnow and stop using . syntax for dealing with tokens --- nova/api/openstack/auth.py | 21 ++++++++++----------- nova/tests/api/openstack/test_auth.py | 2 +- 2 files changed, 11 insertions(+), 12 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 311e6bde9..7ccd46f54 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -17,7 +17,6 @@ import datetime import hashlib -import json import time import webob.exc @@ -25,11 +24,9 @@ import webob.dec from nova import auth from nova import context -from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import manager from nova import utils from nova import wsgi from nova.api.openstack import faults @@ -102,11 +99,13 @@ class AuthMiddleware(wsgi.Middleware): token, user = self._authorize_user(username, key, req) if user and token: res = webob.Response() - res.headers['X-Auth-Token'] = token.token_hash + res.headers['X-Auth-Token'] = token['token_hash'] + # NOTE(vish): the apparrent typo in manageent is actually how it + # is in the db res.headers['X-Server-Management-Url'] = \ - token.server_management_url - res.headers['X-Storage-Url'] = token.storage_url - res.headers['X-CDN-Management-Url'] = token.cdn_management_url + token['server_manageent_url'] + res.headers['X-Storage-Url'] = token['storage_url'] + res.headers['X-CDN-Management-Url'] = token['cdn_management_url'] res.content_type = 'text/plain' res.status = '204' LOG.debug(_("Successfully authenticated '%s'") % username) @@ -130,11 +129,11 @@ class AuthMiddleware(wsgi.Middleware): except exception.NotFound: return None if token: - delta = datetime.datetime.now() - token.created_at + delta = datetime.datetime.utcnow() - token['created_at'] if delta.days >= 2: - self.db.auth_token_destroy(ctxt, token.token_hash) + self.db.auth_token_destroy(ctxt, token['token_hash']) else: - return self.auth.get_user(token.user_id) + return self.auth.get_user(token['user_id']) return None def _authorize_user(self, username, key, req): @@ -159,7 +158,7 @@ class AuthMiddleware(wsgi.Middleware): token_dict['token_hash'] = token_hash token_dict['cdn_management_url'] = '' os_url = req.url - token_dict['server_management_url'] = os_url + token_dict['server_manageent_url'] = os_url token_dict['storage_url'] = '' token_dict['user_id'] = user.id token = self.db.auth_token_create(ctxt, token_dict) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 8f189c744..a35bdfef3 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -150,7 +150,7 @@ class TestFunctional(test.TestCase): tok = db.auth_token_create(ctx, dict( token_hash='test_token_hash', cdn_management_url='', - server_management_url='', + server_manageent_url='', storage_url='', user_id='user1', )) -- cgit From 862097d822b49d79c0a3f2c317ae9cec90d5120e Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 17 May 2011 18:58:38 +0000 Subject: Update test case to ensure password gets set correctly --- nova/tests/api/openstack/test_servers.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index e8182b6a9..ca5b06c72 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -138,6 +138,16 @@ def find_host(self, context, instance_id): return "nova" +class MockSetAdminPassword(object): + def __init__(self): + self.instance_id = None + self.password = None + + def __call__(self, context, instance_id, password): + self.instance_id = instance_id + self.password = password + + class ServersTest(test.TestCase): def setUp(self): @@ -773,6 +783,8 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.db.api, 'instance_update', server_update) self.stubs.Set(nova.compute.api.API, "_find_host", find_host) + mock_method = MockSetAdminPassword() + self.stubs.Set(nova.compute.api.API, '_set_admin_password', mock_method) req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' @@ -780,6 +792,8 @@ class ServersTest(test.TestCase): req.body = self.body res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 204) + self.assertEqual(mock_method.instance_id, '1') + self.assertEqual(mock_method.password, 'bacon') def test_update_server_adminPass_ignored_v1_1(self): inst_dict = dict(name='server_test', adminPass='bacon') @@ -996,16 +1010,6 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 501) def test_server_change_password_v1_1(self): - - class MockSetAdminPassword(object): - def __init__(self): - self.instance_id = None - self.password = None - - def __call__(self, context, instance_id, password): - self.instance_id = instance_id - self.password = password - mock_method = MockSetAdminPassword() self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method) body = {'changePassword': {'adminPass': '1234pass'}} -- cgit From 91e96cea27c91190f6205defa1f5a3641a0e0f56 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 May 2011 12:12:48 -0700 Subject: add migration for proper name --- nova/api/openstack/auth.py | 6 +-- .../versions/017_rename_server_management_url.py | 60 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 2 +- nova/tests/api/openstack/test_auth.py | 2 +- 4 files changed, 64 insertions(+), 6 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/017_rename_server_management_url.py (limited to 'nova') diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 7ccd46f54..6c6ee22a2 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -100,10 +100,8 @@ class AuthMiddleware(wsgi.Middleware): if user and token: res = webob.Response() res.headers['X-Auth-Token'] = token['token_hash'] - # NOTE(vish): the apparrent typo in manageent is actually how it - # is in the db res.headers['X-Server-Management-Url'] = \ - token['server_manageent_url'] + token['server_management_url'] res.headers['X-Storage-Url'] = token['storage_url'] res.headers['X-CDN-Management-Url'] = token['cdn_management_url'] res.content_type = 'text/plain' @@ -158,7 +156,7 @@ class AuthMiddleware(wsgi.Middleware): token_dict['token_hash'] = token_hash token_dict['cdn_management_url'] = '' os_url = req.url - token_dict['server_manageent_url'] = os_url + token_dict['server_management_url'] = os_url token_dict['storage_url'] = '' token_dict['user_id'] = user.id token = self.db.auth_token_create(ctxt, token_dict) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/017_rename_server_management_url.py new file mode 100644 index 000000000..a169afb40 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/017_rename_server_management_url.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table +#from nova import log as logging + +meta = MetaData() + +c_manageent = Column('server_manageent_url', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + +c_management = Column('server_management_url', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + tokens = Table('auth_tokens', meta, autoload=True, + autoload_with=migrate_engine) + + tokens.create_column(c_management) + migrate_engine.execute(tokens.update() + .values(server_management_url=tokens.c.server_manageent_url)) + + tokens.c.server_manageent_url.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + tokens = Table('auth_tokens', meta, autoload=True, + autoload_with=migrate_engine) + + tokens.create_column(c_manageent) + migrate_engine.execute(tokens.update() + .values(server_manageent_url=tokens.c.server_management_url)) + + tokens.c.server_management_url.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 0b46d5a05..d9243a52e 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -495,7 +495,7 @@ class AuthToken(BASE, NovaBase): __tablename__ = 'auth_tokens' token_hash = Column(String(255), primary_key=True) user_id = Column(String(255)) - server_manageent_url = Column(String(255)) + server_management_url = Column(String(255)) storage_url = Column(String(255)) cdn_management_url = Column(String(255)) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index a35bdfef3..8f189c744 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -150,7 +150,7 @@ class TestFunctional(test.TestCase): tok = db.auth_token_create(ctx, dict( token_hash='test_token_hash', cdn_management_url='', - server_manageent_url='', + server_management_url='', storage_url='', user_id='user1', )) -- cgit From 7ab16489276daa2ec6f51fea6ec24cc0c46a8e14 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Tue, 17 May 2011 15:14:52 -0400 Subject: Changed builder to match specs and added test --- nova/api/openstack/limits.py | 9 +++++++-- nova/api/openstack/views/limits.py | 24 +++++++++--------------- nova/tests/api/openstack/test_limits.py | 10 ++++++++-- 3 files changed, 24 insertions(+), 19 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index cf96b1bce..e383b5efc 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -42,6 +42,9 @@ PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 +#TODO remove when mark catches up +TEST_ABSOLUTE_LIMITS = {} + class LimitsController(common.OpenstackController): """ @@ -53,7 +56,8 @@ class LimitsController(common.OpenstackController): "attributes": { "limit": ["verb", "URI", "uri", "regex", "value", "unit", "resetTime", "next-available", "remaining", "name"], - "absolute_limit": ["limit", "value"], + "absolute_limit": ["maxTotalRAMSize", "maxTotalInstances", + "maxTotalCores"], }, "plurals": { "rate": "limit", @@ -69,7 +73,8 @@ class LimitsController(common.OpenstackController): # TODO(alex.meade) make this work #project_quota = quota.get_project_quota(...) #abs_limits = project_quota.limits - abs_limits = {} + #TODO remove when mark catches up + abs_limits = TEST_ABSOLUTE_LIMITS rate_limits = req.environ.get("nova.limits", []) builder = self._get_view_builder(req) diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py index 7fae2d166..ef1243f3d 100644 --- a/nova/api/openstack/views/limits.py +++ b/nova/api/openstack/views/limits.py @@ -112,25 +112,19 @@ class ViewBuilderV11(ViewBuilder): For example: {"ram": 512, "gigabytes": 1024}. """ - limits = [] + limits = {} #loops through absolute limits and their values for absolute_limit_key, absolute_limit_value \ in absolute_limits.items(): - _abs_limit = None - # check for existing key - for limit in limits: - if limit["limit"] == absolute_limit_key: - _abs_limit = limit - break - - # ensure we have a key if we didn't find one - if not _abs_limit: - _abs_limit = { - "limit": absolute_limit_key, - "value": absolute_limit_value, - } + _abs_limit_map = { + "ram": "maxTotalRAMSize", + "instances": "maxTotalInstances", + "cores": "maxTotalCores", + } - limits.append(_abs_limit) + if not absolute_limit_value is None: + limits[_abs_limit_map[absolute_limit_key]] \ + = absolute_limit_value return limits diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 2689c7a24..c8a7dd7f2 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -199,6 +199,9 @@ class LimitsControllerV11Test(BaseLimitTestSuite): 5, 60).display(), ] request.environ["nova.limits"] = _limits + #set absolute limits here + limits.TEST_ABSOLUTE_LIMITS = {"ram": 512, "instances": 5} + return request def test_empty_index_json(self): @@ -208,7 +211,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): expected = { "limits": { "rate": [], - "absolute": [], + "absolute": {}, }, } body = json.loads(response.body) @@ -257,7 +260,10 @@ class LimitsControllerV11Test(BaseLimitTestSuite): }, ], - "absolute": [], + "absolute": { + "maxTotalRAMSize": 512, + "maxTotalInstances": 5, + }, }, } body = json.loads(response.body) -- cgit From 2f23012b79d422b32832396147d308cd062b8d39 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 May 2011 12:30:39 -0700 Subject: fix test --- nova/tests/api/openstack/fakes.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'nova') diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 8b0729c35..bf51239e6 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -228,6 +228,9 @@ class FakeToken(object): # FIXME(sirp): let's not use id here id = 0 + def __getitem__(self, key): + return getattr(self, key) + def __init__(self, **kwargs): FakeToken.id += 1 self.id = FakeToken.id -- cgit From 1bc00ba6e7d13ab3533297ecda6c10965776dd8a Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Tue, 17 May 2011 15:36:00 -0400 Subject: support unlimited quotas in nova-manage and flags --- nova/quota.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/quota.py b/nova/quota.py index b37fa5ba4..53eb34043 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -46,8 +46,8 @@ flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255, 'number of bytes allowed per injected file path') -def get_quota(context, project_id): - rval = { +def _get_default_quota(): + defaults = { 'instances': FLAGS.quota_instances, 'cores': FLAGS.quota_cores, 'ram': FLAGS.quota_ram, @@ -56,7 +56,15 @@ def get_quota(context, project_id): 'floating_ips': FLAGS.quota_floating_ips, 'metadata_items': FLAGS.quota_metadata_items, } + # -1 in the quota flags means unlimited + for key in defaults.keys(): + if defaults[key] == -1: + defaults[key] = None + return defaults + +def get_quota(context, project_id): + rval = _get_default_quota() quota = db.quota_get_all_by_project(context, project_id) for key in rval.keys(): if key in quota: -- cgit From 0bc5511ccfb4ea97a0ba4c8533ce5d3cd3e6df19 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Tue, 17 May 2011 16:25:45 -0400 Subject: Removed extra serialization metadata --- nova/api/openstack/limits.py | 3 --- 1 file changed, 3 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index e383b5efc..5e3f93f79 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -56,12 +56,9 @@ class LimitsController(common.OpenstackController): "attributes": { "limit": ["verb", "URI", "uri", "regex", "value", "unit", "resetTime", "next-available", "remaining", "name"], - "absolute_limit": ["maxTotalRAMSize", "maxTotalInstances", - "maxTotalCores"], }, "plurals": { "rate": "limit", - "absolute": "absolute_limit", }, }, } -- cgit From 3506ae02196b0d44e36f915914b98abdc46db37b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 May 2011 13:49:51 -0700 Subject: move migration 017 to 018 --- .../versions/017_rename_server_management_url.py | 60 ---------------------- .../versions/018_rename_server_management_url.py | 60 ++++++++++++++++++++++ 2 files changed, 60 insertions(+), 60 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/017_rename_server_management_url.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/017_rename_server_management_url.py deleted file mode 100644 index a169afb40..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/017_rename_server_management_url.py +++ /dev/null @@ -1,60 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column, Integer, MetaData, String, Table -#from nova import log as logging - -meta = MetaData() - -c_manageent = Column('server_manageent_url', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - -c_management = Column('server_management_url', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - tokens = Table('auth_tokens', meta, autoload=True, - autoload_with=migrate_engine) - - tokens.create_column(c_management) - migrate_engine.execute(tokens.update() - .values(server_management_url=tokens.c.server_manageent_url)) - - tokens.c.server_manageent_url.drop() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - tokens = Table('auth_tokens', meta, autoload=True, - autoload_with=migrate_engine) - - tokens.create_column(c_manageent) - migrate_engine.execute(tokens.update() - .values(server_manageent_url=tokens.c.server_management_url)) - - tokens.c.server_management_url.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py new file mode 100644 index 000000000..a169afb40 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table +#from nova import log as logging + +meta = MetaData() + +c_manageent = Column('server_manageent_url', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + +c_management = Column('server_management_url', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + tokens = Table('auth_tokens', meta, autoload=True, + autoload_with=migrate_engine) + + tokens.create_column(c_management) + migrate_engine.execute(tokens.update() + .values(server_management_url=tokens.c.server_manageent_url)) + + tokens.c.server_manageent_url.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + tokens = Table('auth_tokens', meta, autoload=True, + autoload_with=migrate_engine) + + tokens.create_column(c_manageent) + migrate_engine.execute(tokens.update() + .values(server_manageent_url=tokens.c.server_management_url)) + + tokens.c.server_management_url.drop() -- cgit From bd0125647a04ab8da7eef934e4a97560c1553551 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 18 May 2011 15:31:41 +0000 Subject: Fix call to spawn_n() instead. It expects a callable --- nova/api/openstack/servers.py | 4 +--- nova/compute/api.py | 2 +- nova/tests/api/openstack/test_servers.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index bd9711555..fcb630fae 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -609,9 +609,7 @@ class ControllerV10(Controller): def _parse_update(self, context, server_id, inst_dict, update_dict): if 'adminPass' in inst_dict['server']: update_dict['admin_pass'] = inst_dict['server']['adminPass'] - # We call _set_admin_password() here to avoid the spawn_n in - # set_admin_password() - self.compute_api._set_admin_password(context, server_id, + self.compute_api.set_admin_password(context, server_id, inst_dict['server']['adminPass']) def _action_rebuild(self, info, request, instance_id): diff --git a/nova/compute/api.py b/nova/compute/api.py index a12f8d515..2dbea8050 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -658,7 +658,7 @@ class API(base.Base): def set_admin_password(self, context, instance_id, password=None): """Set the root/admin password for the given instance.""" - eventlet.spawn_n(self._set_admin_password(context, instance_id, + eventlet.spawn_n(self._set_admin_password, (context, instance_id, password)) def inject_file(self, context, instance_id): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index ca5b06c72..dc8815845 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -784,7 +784,7 @@ class ServersTest(test.TestCase): server_update) self.stubs.Set(nova.compute.api.API, "_find_host", find_host) mock_method = MockSetAdminPassword() - self.stubs.Set(nova.compute.api.API, '_set_admin_password', mock_method) + self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method) req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' -- cgit From 4d025ef1d2b2b97c13d710cb5080b78e246215bc Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 18 May 2011 11:27:39 -0500 Subject: Added missing xenhost plugin. --- nova/virt/xenapi_conn.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index eb572f295..6d828e109 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -169,15 +169,15 @@ class XenAPIConnection(driver.ComputeDriver): def __init__(self, url, user, pw): super(XenAPIConnection, self).__init__() - session = XenAPISession(url, user, pw) - self._vmops = VMOps(session) - self._volumeops = VolumeOps(session) + self._session = XenAPISession(url, user, pw) + self._vmops = VMOps(self._session) + self._volumeops = VolumeOps(self._session) self._host_state = None @property def HostState(self): if not self._host_state: - self._host_state = HostState(self.session) + self._host_state = HostState(self._session) return self._host_state def init_host(self, host): -- cgit From 156ebab6599f9500d8b98c7cc1271d2502fa0627 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Wed, 18 May 2011 13:54:51 -0400 Subject: get real absolute limits in openstack api and verify absolute limit responses --- nova/api/openstack/limits.py | 11 +++------ nova/api/openstack/views/limits.py | 23 +++++++---------- nova/db/sqlalchemy/api.py | 4 +-- nova/tests/api/openstack/test_limits.py | 44 ++++++++++++++++++++++++++++++--- 4 files changed, 55 insertions(+), 27 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 5e3f93f79..1411cec01 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -30,6 +30,7 @@ from collections import defaultdict from webob.dec import wsgify +from nova import quota from nova import wsgi from nova.api.openstack import common from nova.api.openstack import faults @@ -42,9 +43,6 @@ PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 -#TODO remove when mark catches up -TEST_ABSOLUTE_LIMITS = {} - class LimitsController(common.OpenstackController): """ @@ -67,11 +65,8 @@ class LimitsController(common.OpenstackController): """ Return all global and rate limit information. """ - # TODO(alex.meade) make this work - #project_quota = quota.get_project_quota(...) - #abs_limits = project_quota.limits - #TODO remove when mark catches up - abs_limits = TEST_ABSOLUTE_LIMITS + context = req.environ['nova.context'] + abs_limits = quota.get_quota(context, context.project_id) rate_limits = req.environ.get("nova.limits", []) builder = self._get_view_builder(req) diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py index ef1243f3d..464f91e33 100644 --- a/nova/api/openstack/views/limits.py +++ b/nova/api/openstack/views/limits.py @@ -112,19 +112,14 @@ class ViewBuilderV11(ViewBuilder): For example: {"ram": 512, "gigabytes": 1024}. """ + limit_names = { + "ram": "maxTotalRAMSize", + "instances": "maxTotalInstances", + "cores": "maxTotalCores", + "metadata_items": "maxServerMeta", + } limits = {} - #loops through absolute limits and their values - for absolute_limit_key, absolute_limit_value \ - in absolute_limits.items(): - - _abs_limit_map = { - "ram": "maxTotalRAMSize", - "instances": "maxTotalInstances", - "cores": "maxTotalCores", - } - - if not absolute_limit_value is None: - limits[_abs_limit_map[absolute_limit_key]] \ - = absolute_limit_value - + for name, value in absolute_limits.iteritems(): + if name in limit_names and value is not None: + limits[limit_names[name]] = value return limits diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 6d34a87ca..59d2c6aec 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1496,7 +1496,7 @@ def auth_token_create(_context, token): ################### -@require_admin_context +@require_context def quota_get(context, project_id, resource, session=None): if not session: session = get_session() @@ -1510,7 +1510,7 @@ def quota_get(context, project_id, resource, session=None): return result -@require_admin_context +@require_context def quota_get_all_by_project(context, project_id): session = get_session() result = {'project_id': project_id} diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index c8a7dd7f2..4e411f8fb 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -27,6 +27,7 @@ import webob from xml.dom.minidom import parseString +import nova.context from nova.api.openstack import limits @@ -75,6 +76,8 @@ class LimitsControllerV10Test(BaseLimitTestSuite): "action": "index", "controller": "", }) + context = nova.context.RequestContext('testuser', 'testproject') + request.environ["nova.context"] = context return request def _populate_limits(self, request): @@ -179,6 +182,10 @@ class LimitsControllerV11Test(BaseLimitTestSuite): """Run before each test.""" BaseLimitTestSuite.setUp(self) self.controller = limits.LimitsControllerV11() + self.absolute_limits = {} + def stub_get_quota(context, project_id): + return self.absolute_limits + self.stubs.Set(nova.quota, "get_quota", stub_get_quota) def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" @@ -188,6 +195,8 @@ class LimitsControllerV11Test(BaseLimitTestSuite): "action": "index", "controller": "", }) + context = nova.context.RequestContext('testuser', 'testproject') + request.environ["nova.context"] = context return request def _populate_limits(self, request): @@ -199,9 +208,6 @@ class LimitsControllerV11Test(BaseLimitTestSuite): 5, 60).display(), ] request.environ["nova.limits"] = _limits - #set absolute limits here - limits.TEST_ABSOLUTE_LIMITS = {"ram": 512, "instances": 5} - return request def test_empty_index_json(self): @@ -221,6 +227,11 @@ class LimitsControllerV11Test(BaseLimitTestSuite): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits(request) + self.absolute_limits = { + 'ram': 512, + 'instances': 5, + 'cores': 21, + } response = request.get_response(self.controller) expected = { "limits": { @@ -263,12 +274,39 @@ class LimitsControllerV11Test(BaseLimitTestSuite): "absolute": { "maxTotalRAMSize": 512, "maxTotalInstances": 5, + "maxTotalCores": 21, }, }, } body = json.loads(response.body) self.assertEqual(expected, body) + def _test_index_absolute_limits_json(self, expected): + request = self._get_index_request() + response = request.get_response(self.controller) + body = json.loads(response.body) + self.assertEqual(expected, body['limits']['absolute']) + + def test_index_ignores_extra_absolute_limits_json(self): + self.absolute_limits = {'unknown_limit': 9001} + self._test_index_absolute_limits_json({}) + + def test_index_absolute_ram_json(self): + self.absolute_limits = {'ram': 1024} + self._test_index_absolute_limits_json({'maxTotalRAMSize': 1024}) + + def test_index_absolute_cores_json(self): + self.absolute_limits = {'cores': 17} + self._test_index_absolute_limits_json({'maxTotalCores': 17}) + + def test_index_absolute_instances_json(self): + self.absolute_limits = {'instances': 19} + self._test_index_absolute_limits_json({'maxTotalInstances': 19}) + + def test_index_absolute_metadata_json(self): + self.absolute_limits = {'metadata_items': 23} + self._test_index_absolute_limits_json({'maxServerMeta': 23}) + class LimitMiddlewareTest(BaseLimitTestSuite): """ -- cgit From 62713a9485f9441a70526bb5245695338215c7af Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 18 May 2011 12:55:17 -0500 Subject: Spacing changes --- nova/notifier/api.py | 1 + nova/notifier/log_notifier.py | 1 + nova/notifier/rabbit_notifier.py | 1 + 3 files changed, 3 insertions(+) (limited to 'nova') diff --git a/nova/notifier/api.py b/nova/notifier/api.py index a2231055a..a3e7a039e 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -19,6 +19,7 @@ import uuid from nova import flags from nova import utils + FLAGS = flags.FLAGS flags.DEFINE_string('default_notification_level', 'INFO', diff --git a/nova/notifier/log_notifier.py b/nova/notifier/log_notifier.py index a3df31721..25dfc693b 100644 --- a/nova/notifier/log_notifier.py +++ b/nova/notifier/log_notifier.py @@ -18,6 +18,7 @@ import json from nova import flags from nova import log as logging + FLAGS = flags.FLAGS diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py index acab79658..d46670b58 100644 --- a/nova/notifier/rabbit_notifier.py +++ b/nova/notifier/rabbit_notifier.py @@ -19,6 +19,7 @@ import nova.context from nova import flags from nova import rpc + FLAGS = flags.FLAGS flags.DEFINE_string('notification_topic', 'notifications', -- cgit From 01f7b0aa8de984baa27be50171526696aac48c0c Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 18 May 2011 14:46:39 -0500 Subject: Adding FlagNotSet exception --- nova/api/openstack/zones.py | 3 ++- nova/exception.py | 4 ++++ nova/tests/api/openstack/test_zones.py | 12 ++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 70653dc0e..145b24347 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -18,6 +18,7 @@ import urlparse from nova import crypto from nova import db +from nova import exception from nova import flags from nova import log as logging from nova.api.openstack import common @@ -54,7 +55,7 @@ def _scrub_zone(zone): def check_encryption_key(func): def wrapped(*args, **kwargs): if not FLAGS.build_plan_encryption_key: - raise exception.Error(_("--build_plan_encryption_key not set")) + raise exception.FlagNotSet(flag='build_plan_encryption_key') return func(*args, **kwargs) return wrapped diff --git a/nova/exception.py b/nova/exception.py index cf6069454..56c20d111 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -255,6 +255,10 @@ class NotFound(NovaException): super(NotFound, self).__init__(**kwargs) +class FlagNotSet(NotFound): + message = _("Required flag %(flag)s not set.") + + class InstanceNotFound(NotFound): message = _("Instance %(instance_id)s could not be found.") diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index b42b3e7d8..62a763c6f 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -21,6 +21,7 @@ import json import nova.db from nova import context from nova import crypto +from nova import exception from nova import flags from nova import test from nova.api.openstack import zones @@ -120,6 +121,17 @@ class ZonesTest(test.TestCase): FLAGS.zone_capabilities = self.old_zone_capabilities super(ZonesTest, self).tearDown() + def test_check_encryption_key(self): + @zones.check_encryption_key + def test_func(): + return 42 + + self.assertRaises(exception.FlagNotSet, test_func) + + FLAGS.build_plan_encryption_key = "something" + ret = test_func() + self.assertEqual(ret, 42) + def test_get_zone_list_scheduler(self): self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler) req = webob.Request.blank('/v1.0/zones') -- cgit From d44a4728c23cebd1eaa7615c3b439e44972750cc Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 18 May 2011 15:14:24 -0500 Subject: On second thought, removing decorator --- nova/api/openstack/zones.py | 12 +++--------- nova/tests/api/openstack/test_zones.py | 11 ----------- 2 files changed, 3 insertions(+), 20 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 145b24347..af73d8f6d 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -52,14 +52,6 @@ def _scrub_zone(zone): 'deleted', 'deleted_at', 'updated_at')) -def check_encryption_key(func): - def wrapped(*args, **kwargs): - if not FLAGS.build_plan_encryption_key: - raise exception.FlagNotSet(flag='build_plan_encryption_key') - return func(*args, **kwargs) - return wrapped - - class Controller(common.OpenstackController): _serialization_metadata = { @@ -117,7 +109,6 @@ class Controller(common.OpenstackController): zone = api.zone_update(context, zone_id, env["zone"]) return dict(zone=_scrub_zone(zone)) - @check_encryption_key def select(self, req): """Returns a weighted list of costs to create instances of desired capabilities.""" @@ -138,6 +129,9 @@ class Controller(common.OpenstackController): """Remove all the confidential data and return a sanitized version of the build plan. Include an encrypted full version of the weighting entry so we can get back to it later.""" + if not FLAGS.build_plan_encryption_key: + raise exception.FlagNotSet(flag='build_plan_encryption_key') + encryptor = crypto.encryptor(FLAGS.build_plan_encryption_key) cooked = [] for entry in build_plan: diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 62a763c6f..fa2e05033 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -121,17 +121,6 @@ class ZonesTest(test.TestCase): FLAGS.zone_capabilities = self.old_zone_capabilities super(ZonesTest, self).tearDown() - def test_check_encryption_key(self): - @zones.check_encryption_key - def test_func(): - return 42 - - self.assertRaises(exception.FlagNotSet, test_func) - - FLAGS.build_plan_encryption_key = "something" - ret = test_func() - self.assertEqual(ret, 42) - def test_get_zone_list_scheduler(self): self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler) req = webob.Request.blank('/v1.0/zones') -- cgit From 79d505c015bff1598e8e896f6198d65d90095ba6 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Wed, 18 May 2011 19:22:53 -0400 Subject: fixup absolute limits to latest 1.1 spec --- nova/api/openstack/limits.py | 2 +- nova/api/openstack/views/limits.py | 11 ++--- nova/compute/api.py | 5 ++- nova/quota.py | 30 ++++++++----- nova/tests/api/openstack/test_limits.py | 10 ++++- nova/tests/test_quota.py | 80 +++++++++++++++++++++++++++------ 6 files changed, 105 insertions(+), 33 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 1411cec01..032a5ff2f 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -66,7 +66,7 @@ class LimitsController(common.OpenstackController): Return all global and rate limit information. """ context = req.environ['nova.context'] - abs_limits = quota.get_quota(context, context.project_id) + abs_limits = quota.get_quota(context, context.project_id) rate_limits = req.environ.get("nova.limits", []) builder = self._get_view_builder(req) diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py index 464f91e33..5b34c8ad0 100644 --- a/nova/api/openstack/views/limits.py +++ b/nova/api/openstack/views/limits.py @@ -113,13 +113,14 @@ class ViewBuilderV11(ViewBuilder): """ limit_names = { - "ram": "maxTotalRAMSize", - "instances": "maxTotalInstances", - "cores": "maxTotalCores", - "metadata_items": "maxServerMeta", + "ram": ["maxTotalRAMSize"], + "instances": ["maxTotalInstances"], + "cores": ["maxTotalCores"], + "metadata_items": ["maxServerMeta", "maxImageMeta"], } limits = {} for name, value in absolute_limits.iteritems(): if name in limit_names and value is not None: - limits[limit_names[name]] = value + for name in limit_names[name]: + limits[name] = value return limits diff --git a/nova/compute/api.py b/nova/compute/api.py index a12f8d515..912dd363c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -95,14 +95,15 @@ class API(base.Base): """ if injected_files is None: return - limit = quota.allowed_injected_files(context) + limit = quota.allowed_injected_files(context, len(injected_files)) if len(injected_files) > limit: raise quota.QuotaError(code="OnsetFileLimitExceeded") path_limit = quota.allowed_injected_file_path_bytes(context) - content_limit = quota.allowed_injected_file_content_bytes(context) for path, content in injected_files: if len(path) > path_limit: raise quota.QuotaError(code="OnsetFilePathLimitExceeded") + content_limit = quota.allowed_injected_file_content_bytes( + context, len(content)) if len(content) > content_limit: raise quota.QuotaError(code="OnsetFileContentLimitExceeded") diff --git a/nova/quota.py b/nova/quota.py index 53eb34043..d98249abd 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -55,6 +55,9 @@ def _get_default_quota(): 'gigabytes': FLAGS.quota_gigabytes, 'floating_ips': FLAGS.quota_floating_ips, 'metadata_items': FLAGS.quota_metadata_items, + 'injected_files': FLAGS.quota_max_injected_files, + 'injected_file_content_bytes': + FLAGS.quota_max_injected_file_content_bytes, } # -1 in the quota flags means unlimited for key in defaults.keys(): @@ -128,24 +131,29 @@ def allowed_floating_ips(context, num_floating_ips): return min(num_floating_ips, allowed_floating_ips) +def _calculate_simple_quota(context, resource, requested): + """Check quota for resource; return min(requested, allowed).""" + quota = get_quota(context, context.project_id) + allowed = _get_request_allotment(requested, 0, quota[resource]) + return min(requested, allowed) + + def allowed_metadata_items(context, num_metadata_items): - """Check quota; return min(num_metadata_items,allowed_metadata_items).""" - project_id = context.project_id - context = context.elevated() - quota = get_quota(context, project_id) - allowed_metadata_items = _get_request_allotment(num_metadata_items, 0, - quota['metadata_items']) - return min(num_metadata_items, allowed_metadata_items) + """Return the number of metadata items allowed.""" + return _calculate_simple_quota(context, 'metadata_items', + num_metadata_items) -def allowed_injected_files(context): +def allowed_injected_files(context, num_injected_files): """Return the number of injected files allowed.""" - return FLAGS.quota_max_injected_files + return _calculate_simple_quota(context, 'injected_files', + num_injected_files) -def allowed_injected_file_content_bytes(context): +def allowed_injected_file_content_bytes(context, num_bytes): """Return the number of bytes allowed per injected file content.""" - return FLAGS.quota_max_injected_file_content_bytes + resource = 'injected_file_content_bytes' + return _calculate_simple_quota(context, resource, num_bytes) def allowed_injected_file_path_bytes(context): diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 4e411f8fb..7f53bd5c4 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -183,8 +183,10 @@ class LimitsControllerV11Test(BaseLimitTestSuite): BaseLimitTestSuite.setUp(self) self.controller = limits.LimitsControllerV11() self.absolute_limits = {} + def stub_get_quota(context, project_id): return self.absolute_limits + self.stubs.Set(nova.quota, "get_quota", stub_get_quota) def _get_index_request(self, accept_header="application/json"): @@ -304,8 +306,14 @@ class LimitsControllerV11Test(BaseLimitTestSuite): self._test_index_absolute_limits_json({'maxTotalInstances': 19}) def test_index_absolute_metadata_json(self): + # NOTE: both server metadata and image metadata are overloaded + # into metadata_items self.absolute_limits = {'metadata_items': 23} - self._test_index_absolute_limits_json({'maxServerMeta': 23}) + expected = { + 'maxServerMeta': 23, + 'maxImageMeta': 23, + } + self._test_index_absolute_limits_json(expected) class LimitMiddlewareTest(BaseLimitTestSuite): diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 7ace2ad7d..916fca55e 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -104,6 +104,10 @@ class QuotaTestCase(test.TestCase): num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 10) + db.quota_create(self.context, self.project.id, 'ram', 3 * 2048) + num_instances = quota.allowed_instances(self.context, 100, + self._get_instance_type('m1.small')) + self.assertEqual(num_instances, 3) # metadata_items too_many_items = FLAGS.quota_metadata_items + 1000 @@ -120,7 +124,8 @@ class QuotaTestCase(test.TestCase): def test_unlimited_instances(self): FLAGS.quota_instances = 2 - FLAGS.quota_cores = 1000 + FLAGS.quota_ram = -1 + FLAGS.quota_cores = -1 instance_type = self._get_instance_type('m1.small') num_instances = quota.allowed_instances(self.context, 100, instance_type) @@ -133,8 +138,25 @@ class QuotaTestCase(test.TestCase): instance_type) self.assertEqual(num_instances, 101) + def test_unlimited_ram(self): + FLAGS.quota_instances = -1 + FLAGS.quota_ram = 2 * 2048 + FLAGS.quota_cores = -1 + instance_type = self._get_instance_type('m1.small') + num_instances = quota.allowed_instances(self.context, 100, + instance_type) + self.assertEqual(num_instances, 2) + db.quota_create(self.context, self.project.id, 'ram', None) + num_instances = quota.allowed_instances(self.context, 100, + instance_type) + self.assertEqual(num_instances, 100) + num_instances = quota.allowed_instances(self.context, 101, + instance_type) + self.assertEqual(num_instances, 101) + def test_unlimited_cores(self): - FLAGS.quota_instances = 1000 + FLAGS.quota_instances = -1 + FLAGS.quota_ram = -1 FLAGS.quota_cores = 2 instance_type = self._get_instance_type('m1.small') num_instances = quota.allowed_instances(self.context, 100, @@ -150,7 +172,7 @@ class QuotaTestCase(test.TestCase): def test_unlimited_volumes(self): FLAGS.quota_volumes = 10 - FLAGS.quota_gigabytes = 1000 + FLAGS.quota_gigabytes = -1 volumes = quota.allowed_volumes(self.context, 100, 1) self.assertEqual(volumes, 10) db.quota_create(self.context, self.project.id, 'volumes', None) @@ -160,7 +182,7 @@ class QuotaTestCase(test.TestCase): self.assertEqual(volumes, 101) def test_unlimited_gigabytes(self): - FLAGS.quota_volumes = 1000 + FLAGS.quota_volumes = -1 FLAGS.quota_gigabytes = 10 volumes = quota.allowed_volumes(self.context, 100, 1) self.assertEqual(volumes, 10) @@ -274,10 +296,47 @@ class QuotaTestCase(test.TestCase): image_id='fake', metadata=metadata) - def test_allowed_injected_files(self): - self.assertEqual( - quota.allowed_injected_files(self.context), - FLAGS.quota_max_injected_files) + def test_default_allowed_injected_files(self): + FLAGS.quota_max_injected_files = 55 + self.assertEqual(quota.allowed_injected_files(self.context, 100), 55) + + def test_overridden_allowed_injected_files(self): + FLAGS.quota_max_injected_files = 5 + db.quota_create(self.context, self.project.id, 'injected_files', 77) + self.assertEqual(quota.allowed_injected_files(self.context, 100), 77) + + def test_unlimited_default_allowed_injected_files(self): + FLAGS.quota_max_injected_files = -1 + self.assertEqual(quota.allowed_injected_files(self.context, 100), 100) + + def test_unlimited_db_allowed_injected_files(self): + FLAGS.quota_max_injected_files = 5 + db.quota_create(self.context, self.project.id, 'injected_files', None) + self.assertEqual(quota.allowed_injected_files(self.context, 100), 100) + + def test_default_allowed_injected_file_content_bytes(self): + FLAGS.quota_max_injected_file_content_bytes = 12345 + limit = quota.allowed_injected_file_content_bytes(self.context, 23456) + self.assertEqual(limit, 12345) + + def test_overridden_allowed_injected_file_content_bytes(self): + FLAGS.quota_max_injected_file_content_bytes = 12345 + db.quota_create(self.context, self.project.id, + 'injected_file_content_bytes', 5678) + limit = quota.allowed_injected_file_content_bytes(self.context, 23456) + self.assertEqual(limit, 5678) + + def test_unlimited_default_allowed_injected_file_content_bytes(self): + FLAGS.quota_max_injected_file_content_bytes = -1 + limit = quota.allowed_injected_file_content_bytes(self.context, 23456) + self.assertEqual(limit, 23456) + + def test_unlimited_db_allowed_injected_file_content_bytes(self): + FLAGS.quota_max_injected_file_content_bytes = 12345 + db.quota_create(self.context, self.project.id, + 'injected_file_content_bytes', None) + limit = quota.allowed_injected_file_content_bytes(self.context, 23456) + self.assertEqual(limit, 23456) def _create_with_injected_files(self, files): api = compute.API(image_service=self.StubImageService()) @@ -304,11 +363,6 @@ class QuotaTestCase(test.TestCase): self.assertRaises(quota.QuotaError, self._create_with_injected_files, files) - def test_allowed_injected_file_content_bytes(self): - self.assertEqual( - quota.allowed_injected_file_content_bytes(self.context), - FLAGS.quota_max_injected_file_content_bytes) - def test_max_injected_file_content_bytes(self): max = FLAGS.quota_max_injected_file_content_bytes content = ''.join(['a' for i in xrange(max)]) -- cgit From 0b698186b56af6580633dedd7916df2897945f29 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 19 May 2011 21:31:14 +0900 Subject: Avoid wildcard import. --- .../migrate_repo/versions/019_add_volume_snapshot_support.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py index 288f63e72..5a44bac16 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py @@ -15,8 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Column, Table, MetaData +from sqlalchemy import Integer, DateTime, Boolean, String from nova import log as logging -- cgit From a4cc51b78ae5e08227bef7a4be52953776a3e947 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 19 May 2011 21:49:15 +0900 Subject: Add a unitest to test EC2 snapshot APIs. --- nova/tests/test_cloud.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) (limited to 'nova') diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index c8559615a..d9169a646 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -188,6 +188,52 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, service1['id']) db.service_destroy(self.context, service2['id']) + def test_describe_snapshots(self): + """Makes sure describe_snapshots works and filters results.""" + vol = db.volume_create(self.context, {}) + snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']}) + snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']}) + result = self.cloud.describe_snapshots(self.context) + self.assertEqual(len(result['snapshotSet']), 2) + snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x') + result = self.cloud.describe_snapshots(self.context, + snapshot_id=[snapshot_id]) + self.assertEqual(len(result['snapshotSet']), 1) + self.assertEqual( + ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']), + snap2['id']) + db.snapshot_destroy(self.context, snap1['id']) + db.snapshot_destroy(self.context, snap2['id']) + db.volume_destroy(self.context, vol['id']) + + def test_create_snapshot(self): + """Makes sure create_snapshot works.""" + vol = db.volume_create(self.context, {'status': "available"}) + volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x') + + result = self.cloud.create_snapshot(self.context, + volume_id=volume_id) + snapshot_id = result['snapshotId'] + result = self.cloud.describe_snapshots(self.context) + self.assertEqual(len(result['snapshotSet']), 1) + self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id) + + db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id)) + db.volume_destroy(self.context, vol['id']) + + def test_delete_snapshot(self): + """Makes sure delete_snapshot works.""" + vol = db.volume_create(self.context, {'status': "available"}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + + result = self.cloud.delete_snapshot(self.context, + snapshot_id=snapshot_id) + self.assertTrue(result) + + db.volume_destroy(self.context, vol['id']) + def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', -- cgit From c04a59fefbcbd0e5e21cbc8c70eb3147785cf22d Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 19 May 2011 22:06:18 +0900 Subject: Fix comments. --- nova/db/api.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index 1ef82b461..3597732b9 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -884,27 +884,27 @@ def volume_update(context, volume_id, values): def snapshot_create(context, values): - """Create a volume from the values dictionary.""" + """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): - """Create a volume from the values dictionary.""" + """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): - """Get a volume or raise if it does not exist.""" + """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context): - """Get all volumes.""" + """Get all snapshots.""" return IMPL.snapshot_get_all(context) def snapshot_get_all_by_project(context, project_id): - """Get all volumes belonging to a project.""" + """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id) -- cgit From 10816023a71cca189fb77a1989e3dd542a0e9c25 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Thu, 19 May 2011 14:08:15 -0400 Subject: waldon's naming feedback --- nova/api/openstack/limits.py | 2 +- nova/db/api.py | 2 +- nova/quota.py | 64 +++++++++++++++++---------------- nova/tests/api/openstack/test_limits.py | 5 +-- 4 files changed, 38 insertions(+), 35 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 032a5ff2f..bd0250a7f 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -66,7 +66,7 @@ class LimitsController(common.OpenstackController): Return all global and rate limit information. """ context = req.environ['nova.context'] - abs_limits = quota.get_quota(context, context.project_id) + abs_limits = quota.get_project_quotas(context, context.project_id) rate_limits = req.environ.get("nova.limits", []) builder = self._get_view_builder(req) diff --git a/nova/db/api.py b/nova/db/api.py index f341ffeb5..310c0bb09 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -403,7 +403,7 @@ def instance_create(context, values): def instance_data_get_for_project(context, project_id): - """Get (instance_count, core_count, ram_count) for project.""" + """Get (instance_count, total_cores, total_ram) for project.""" return IMPL.instance_data_get_for_project(context, project_id) diff --git a/nova/quota.py b/nova/quota.py index d98249abd..58766e846 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -46,7 +46,7 @@ flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255, 'number of bytes allowed per injected file path') -def _get_default_quota(): +def _get_default_quotas(): defaults = { 'instances': FLAGS.quota_instances, 'cores': FLAGS.quota_cores, @@ -66,8 +66,8 @@ def _get_default_quota(): return defaults -def get_quota(context, project_id): - rval = _get_default_quota() +def get_project_quotas(context, project_id): + rval = _get_default_quotas() quota = db.quota_get_all_by_project(context, project_id) for key in rval.keys(): if key in quota: @@ -81,79 +81,81 @@ def _get_request_allotment(requested, used, quota): return quota - used -def allowed_instances(context, num_instances, instance_type): - """Check quota and return min(num_instances, allowed_instances).""" +def allowed_instances(context, requested_instances, instance_type): + """Check quota and return min(requested_instances, allowed_instances).""" project_id = context.project_id context = context.elevated() - num_cores = num_instances * instance_type['vcpus'] - num_ram = num_instances * instance_type['memory_mb'] + requested_cores = requested_instances * instance_type['vcpus'] + requested_ram = requested_instances * instance_type['memory_mb'] usage = db.instance_data_get_for_project(context, project_id) used_instances, used_cores, used_ram = usage - quota = get_quota(context, project_id) - allowed_instances = _get_request_allotment(num_instances, used_instances, + quota = get_project_quotas(context, project_id) + allowed_instances = _get_request_allotment(requested_instances, + used_instances, quota['instances']) - allowed_cores = _get_request_allotment(num_cores, used_cores, + allowed_cores = _get_request_allotment(requested_cores, used_cores, quota['cores']) - allowed_ram = _get_request_allotment(num_ram, used_ram, quota['ram']) + allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) allowed_instances = min(allowed_instances, allowed_cores // instance_type['vcpus'], allowed_ram // instance_type['memory_mb']) - return min(num_instances, allowed_instances) + return min(requested_instances, allowed_instances) -def allowed_volumes(context, num_volumes, size): - """Check quota and return min(num_volumes, allowed_volumes).""" +def allowed_volumes(context, requested_volumes, size): + """Check quota and return min(requested_volumes, allowed_volumes).""" project_id = context.project_id context = context.elevated() size = int(size) - num_gigabytes = num_volumes * size + requested_gigabytes = requested_volumes * size used_volumes, used_gigabytes = db.volume_data_get_for_project(context, project_id) - quota = get_quota(context, project_id) - allowed_volumes = _get_request_allotment(num_volumes, used_volumes, + quota = get_project_quotas(context, project_id) + allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, quota['volumes']) - allowed_gigabytes = _get_request_allotment(num_gigabytes, used_gigabytes, + allowed_gigabytes = _get_request_allotment(requested_gigabytes, + used_gigabytes, quota['gigabytes']) allowed_volumes = min(allowed_volumes, int(allowed_gigabytes // size)) - return min(num_volumes, allowed_volumes) + return min(requested_volumes, allowed_volumes) -def allowed_floating_ips(context, num_floating_ips): - """Check quota and return min(num_floating_ips, allowed_floating_ips).""" +def allowed_floating_ips(context, requested_floating_ips): + """Check quota and return min(requested, allowed) floating ips.""" project_id = context.project_id context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) - quota = get_quota(context, project_id) - allowed_floating_ips = _get_request_allotment(num_floating_ips, + quota = get_project_quotas(context, project_id) + allowed_floating_ips = _get_request_allotment(requested_floating_ips, used_floating_ips, quota['floating_ips']) - return min(num_floating_ips, allowed_floating_ips) + return min(requested_floating_ips, allowed_floating_ips) def _calculate_simple_quota(context, resource, requested): """Check quota for resource; return min(requested, allowed).""" - quota = get_quota(context, context.project_id) + quota = get_project_quotas(context, context.project_id) allowed = _get_request_allotment(requested, 0, quota[resource]) return min(requested, allowed) -def allowed_metadata_items(context, num_metadata_items): +def allowed_metadata_items(context, requested_metadata_items): """Return the number of metadata items allowed.""" return _calculate_simple_quota(context, 'metadata_items', - num_metadata_items) + requested_metadata_items) -def allowed_injected_files(context, num_injected_files): +def allowed_injected_files(context, requested_injected_files): """Return the number of injected files allowed.""" return _calculate_simple_quota(context, 'injected_files', - num_injected_files) + requested_injected_files) -def allowed_injected_file_content_bytes(context, num_bytes): +def allowed_injected_file_content_bytes(context, requested_bytes): """Return the number of bytes allowed per injected file content.""" resource = 'injected_file_content_bytes' - return _calculate_simple_quota(context, resource, num_bytes) + return _calculate_simple_quota(context, resource, requested_bytes) def allowed_injected_file_path_bytes(context): diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 7f53bd5c4..5e5ee1420 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -184,10 +184,11 @@ class LimitsControllerV11Test(BaseLimitTestSuite): self.controller = limits.LimitsControllerV11() self.absolute_limits = {} - def stub_get_quota(context, project_id): + def stub_get_project_quotas(context, project_id): return self.absolute_limits - self.stubs.Set(nova.quota, "get_quota", stub_get_quota) + self.stubs.Set(nova.quota, "get_project_quotas", + stub_get_project_quotas) def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" -- cgit From 44d90bd1f77f9b2297879263f74567c502944ba4 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Thu, 19 May 2011 16:10:56 -0400 Subject: don't give instance quota errors with negative values --- nova/compute/api.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 912dd363c..3590dc83c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -150,9 +150,13 @@ class API(base.Base): pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) - raise quota.QuotaError(_("Instance quota exceeded. You can only " - "run %s more instances of this type.") % - num_instances, "InstanceLimitExceeded") + if num_instances <= 0: + message = _("Instance quota exceeded. You cannot run any " + "more instances of this type.") + else: + message = _("Instance quota exceeded. You can only run %s " + "more instances of this type.") % num_instances + raise quota.QuotaError(message, "InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) -- cgit From 99bab1b99bf4388a0dba89300c4fb71095681276 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 19 May 2011 16:25:57 -0400 Subject: Moved back templates and fixed pep8 issue. Template move was due to breaking packaging with template moves. That will need to happen in a later merge. --- nova/virt/cpuinfo.xml.template | 9 +++ nova/virt/libvirt.xml.template | 122 +++++++++++++++++++++++++++++++++ nova/virt/libvirt/connection.py | 5 +- nova/virt/libvirt/cpuinfo.xml.template | 9 --- nova/virt/libvirt/libvirt.xml.template | 122 --------------------------------- nova/virt/libvirt/netutils.py | 4 +- 6 files changed, 135 insertions(+), 136 deletions(-) create mode 100644 nova/virt/cpuinfo.xml.template create mode 100644 nova/virt/libvirt.xml.template delete mode 100644 nova/virt/libvirt/cpuinfo.xml.template delete mode 100644 nova/virt/libvirt/libvirt.xml.template (limited to 'nova') diff --git a/nova/virt/cpuinfo.xml.template b/nova/virt/cpuinfo.xml.template new file mode 100644 index 000000000..48842b29d --- /dev/null +++ b/nova/virt/cpuinfo.xml.template @@ -0,0 +1,9 @@ + + $arch + $model + $vendor + +#for $var in $features + +#end for + diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template new file mode 100644 index 000000000..de2497a76 --- /dev/null +++ b/nova/virt/libvirt.xml.template @@ -0,0 +1,122 @@ + + ${name} + ${memory_kb} + +#if $type == 'lxc' + #set $disk_prefix = '' + #set $disk_bus = '' + exe + /sbin/init +#else if $type == 'uml' + #set $disk_prefix = 'ubd' + #set $disk_bus = 'uml' + uml + /usr/bin/linux + /dev/ubda +#else + #if $type == 'xen' + #set $disk_prefix = 'sd' + #set $disk_bus = 'scsi' + linux + /dev/xvda + #else + #set $disk_prefix = 'vd' + #set $disk_bus = 'virtio' + hvm + #end if + #if $getVar('rescue', False) + ${basepath}/kernel.rescue + ${basepath}/ramdisk.rescue + #else + #if $getVar('kernel', None) + ${kernel} + #if $type == 'xen' + ro + #else + root=/dev/vda console=ttyS0 + #end if + #if $getVar('ramdisk', None) + ${ramdisk} + #end if + #else + + #end if + #end if +#end if + + + + + ${vcpus} + +#if $type == 'lxc' + + + + +#else + #if $getVar('rescue', False) + + + + + + + + + + + #else + + + + + + #if $getVar('local', False) + + + + + + #end if + #end if +#end if + +#for $nic in $nics + + + + + + + +#if $getVar('nic.extra_params', False) + ${nic.extra_params} +#end if +#if $getVar('nic.gateway_v6', False) + +#end if + + +#end for + + + + + + + + + + + + + + + + +#if $getVar('vncserver_host', False) + +#end if + + diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 87ba3fec3..94a703954 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -57,7 +57,6 @@ from nova import context from nova import db from nova import exception from nova import flags -from nova import ipv6 from nova import log as logging from nova import utils from nova import vnc @@ -85,7 +84,7 @@ flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') flags.DEFINE_string('libvirt_xml_template', - utils.abspath('virt/libvirt/libvirt.xml.template'), + utils.abspath('virt/libvirt.xml.template'), 'Libvirt XML Template') flags.DEFINE_string('libvirt_type', 'kvm', @@ -108,7 +107,7 @@ flags.DEFINE_string('firewall_driver', 'nova.virt.libvirt.firewall.IptablesFirewallDriver', 'Firewall driver (defaults to iptables)') flags.DEFINE_string('cpuinfo_xml_template', - utils.abspath('virt/libvirt/cpuinfo.xml.template'), + utils.abspath('virt/cpuinfo.xml.template'), 'CpuInfo XML Template (Used only live migration now)') flags.DEFINE_string('live_migration_uri', "qemu+tcp://%s/system", diff --git a/nova/virt/libvirt/cpuinfo.xml.template b/nova/virt/libvirt/cpuinfo.xml.template deleted file mode 100644 index 48842b29d..000000000 --- a/nova/virt/libvirt/cpuinfo.xml.template +++ /dev/null @@ -1,9 +0,0 @@ - - $arch - $model - $vendor - -#for $var in $features - -#end for - diff --git a/nova/virt/libvirt/libvirt.xml.template b/nova/virt/libvirt/libvirt.xml.template deleted file mode 100644 index de2497a76..000000000 --- a/nova/virt/libvirt/libvirt.xml.template +++ /dev/null @@ -1,122 +0,0 @@ - - ${name} - ${memory_kb} - -#if $type == 'lxc' - #set $disk_prefix = '' - #set $disk_bus = '' - exe - /sbin/init -#else if $type == 'uml' - #set $disk_prefix = 'ubd' - #set $disk_bus = 'uml' - uml - /usr/bin/linux - /dev/ubda -#else - #if $type == 'xen' - #set $disk_prefix = 'sd' - #set $disk_bus = 'scsi' - linux - /dev/xvda - #else - #set $disk_prefix = 'vd' - #set $disk_bus = 'virtio' - hvm - #end if - #if $getVar('rescue', False) - ${basepath}/kernel.rescue - ${basepath}/ramdisk.rescue - #else - #if $getVar('kernel', None) - ${kernel} - #if $type == 'xen' - ro - #else - root=/dev/vda console=ttyS0 - #end if - #if $getVar('ramdisk', None) - ${ramdisk} - #end if - #else - - #end if - #end if -#end if - - - - - ${vcpus} - -#if $type == 'lxc' - - - - -#else - #if $getVar('rescue', False) - - - - - - - - - - - #else - - - - - - #if $getVar('local', False) - - - - - - #end if - #end if -#end if - -#for $nic in $nics - - - - - - - -#if $getVar('nic.extra_params', False) - ${nic.extra_params} -#end if -#if $getVar('nic.gateway_v6', False) - -#end if - - -#end for - - - - - - - - - - - - - - - - -#if $getVar('vncserver_host', False) - -#end if - - diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index 9225d8929..4d596078a 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -26,6 +26,7 @@ import IPy from nova import context from nova import db from nova import flags +from nova import ipv6 from nova import utils @@ -46,6 +47,7 @@ def get_ip_version(cidr): net = IPy.IP(cidr) return int(net.version()) + def get_network_info(instance): # TODO(adiantum) If we will keep this function # we should cache network_info @@ -93,5 +95,3 @@ def get_network_info(instance): network_info.append((network, mapping)) return network_info - - -- cgit From b2db9895c271825d1a58ade9c6de85ac90f760a7 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Thu, 19 May 2011 22:56:23 -0400 Subject: fixed pep8 issue --- nova/virt/images.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova') diff --git a/nova/virt/images.py b/nova/virt/images.py index 8689c0ed3..02c898fda 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -42,6 +42,7 @@ def fetch(image_id, path, _user, _project): metadata = image_service.get(elevated, image_id, image_file) return metadata + # TODO(vish): xenapi should use the glance client code directly instead # of retrieving the image using this method. def image_url(image): -- cgit From 1c485a515b299551c44bd4411d82be1cccf5f4bd Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 20 May 2011 00:24:35 -0400 Subject: add absolute limits support to 1.0 api as well --- nova/api/openstack/views/limits.py | 44 +++++++++++++++------------------ nova/tests/api/openstack/test_limits.py | 31 +++++++++++++++++------ 2 files changed, 43 insertions(+), 32 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py index dd7681db4..33be12c0c 100644 --- a/nova/api/openstack/views/limits.py +++ b/nova/api/openstack/views/limits.py @@ -46,7 +46,26 @@ class ViewBuilder(object): return output def _build_absolute_limits(self, absolute_limits): - raise NotImplementedError() + """Builder for absolute limits + + absolute_limits should be given as a dict of limits. + For example: {"ram": 512, "gigabytes": 1024}. + + """ + limit_names = { + "ram": ["maxTotalRAMSize"], + "instances": ["maxTotalInstances"], + "cores": ["maxTotalCores"], + "metadata_items": ["maxServerMeta", "maxImageMeta"], + "injected_files": ["maxPersonality"], + "injected_file_content_bytes": ["maxPersonalitySize"], + } + limits = {} + for name, value in absolute_limits.iteritems(): + if name in limit_names and value is not None: + for name in limit_names[name]: + limits[name] = value + return limits def _build_rate_limits(self, rate_limits): raise NotImplementedError() @@ -72,9 +91,6 @@ class ViewBuilderV10(ViewBuilder): "resetTime": rate_limit["resetTime"], } - def _build_absolute_limits(self, absolute_limit): - return {} - class ViewBuilderV11(ViewBuilder): """Openstack API v1.1 limits view builder.""" @@ -113,23 +129,3 @@ class ViewBuilderV11(ViewBuilder): "unit": rate_limit["unit"], "next-available": rate_limit["resetTime"], } - - def _build_absolute_limits(self, absolute_limits): - """Builder for absolute limits - - absolute_limits should be given as a dict of limits. - For example: {"ram": 512, "gigabytes": 1024}. - - """ - limit_names = { - "ram": ["maxTotalRAMSize"], - "instances": ["maxTotalInstances"], - "cores": ["maxTotalCores"], - "metadata_items": ["maxServerMeta", "maxImageMeta"], - } - limits = {} - for name, value in absolute_limits.iteritems(): - if name in limit_names and value is not None: - for name in limit_names[name]: - limits[name] = value - return limits diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 5e5ee1420..dde4451b4 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -48,6 +48,13 @@ class BaseLimitTestSuite(unittest.TestCase): self.time = 0.0 self.stubs = stubout.StubOutForTesting() self.stubs.Set(limits.Limit, "_get_time", self._get_time) + self.absolute_limits = {} + + def stub_get_project_quotas(context, project_id): + return self.absolute_limits + + self.stubs.Set(nova.quota, "get_project_quotas", + stub_get_project_quotas) def tearDown(self): """Run after each test.""" @@ -106,6 +113,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits(request) + self.absolute_limits = {'ram': 51200, 'instances': 20} response = request.get_response(self.controller) expected = { "limits": { @@ -127,7 +135,10 @@ class LimitsControllerV10Test(BaseLimitTestSuite): "remaining": 5, "unit": "HOUR", }], - "absolute": {}, + "absolute": { + "maxTotalRAMSize": 51200, + "maxTotalInstances": 20, + }, }, } body = json.loads(response.body) @@ -182,13 +193,6 @@ class LimitsControllerV11Test(BaseLimitTestSuite): """Run before each test.""" BaseLimitTestSuite.setUp(self) self.controller = limits.LimitsControllerV11() - self.absolute_limits = {} - - def stub_get_project_quotas(context, project_id): - return self.absolute_limits - - self.stubs.Set(nova.quota, "get_project_quotas", - stub_get_project_quotas) def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" @@ -316,6 +320,17 @@ class LimitsControllerV11Test(BaseLimitTestSuite): } self._test_index_absolute_limits_json(expected) + def test_index_absolute_injected_files(self): + self.absolute_limits = { + 'injected_files': 17, + 'injected_file_content_bytes': 86753, + } + expected = { + 'maxPersonality': 17, + 'maxPersonalitySize': 86753, + } + self._test_index_absolute_limits_json(expected) + class LimitMiddlewareTest(BaseLimitTestSuite): """ -- cgit From 2a9774a061dacba85e254e3d46bc52e8caa8e7af Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 20 May 2011 00:33:12 -0400 Subject: fill out the absolute limit tests for limits v1.0 controller --- nova/tests/api/openstack/test_limits.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index dde4451b4..7f941ef17 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -96,6 +96,18 @@ class LimitsControllerV10Test(BaseLimitTestSuite): request.environ["nova.limits"] = _limits return request + def _setup_absolute_limits(self): + self.absolute_limits = { + 'instances': 5, + 'cores': 8, + 'ram': 2**13, + 'volumes': 21, + 'gigabytes': 34, + 'metadata_items': 55, + 'injected_files': 89, + 'injected_file_content_bytes': 144, + } + def test_empty_index_json(self): """Test getting empty limit details in JSON.""" request = self._get_index_request() @@ -113,7 +125,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits(request) - self.absolute_limits = {'ram': 51200, 'instances': 20} + self._setup_absolute_limits() response = request.get_response(self.controller) expected = { "limits": { @@ -136,8 +148,13 @@ class LimitsControllerV10Test(BaseLimitTestSuite): "unit": "HOUR", }], "absolute": { - "maxTotalRAMSize": 51200, - "maxTotalInstances": 20, + "maxTotalInstances": 5, + "maxTotalCores": 8, + "maxTotalRAMSize": 2**13, + "maxServerMeta": 55, + "maxImageMeta": 55, + "maxPersonality": 89, + "maxPersonalitySize": 144, }, }, } -- cgit From 1faf77248409bac4226822d48cfdd6213be5e24c Mon Sep 17 00:00:00 2001 From: Andrey Brindeyev Date: Fri, 20 May 2011 17:57:04 +0400 Subject: Addressing bug #785763. Usual default for maximum number of DHCP leases in dnsmasq is 150. This prevents instances to obtain IP addresses from DHCP in case we have more than 150 in our network. Adding myself to Authors. --- nova/network/linux_net.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'nova') diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 85c4c278c..1fe77d7f1 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -27,6 +27,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils +from IPy import IP LOG = logging.getLogger("nova.linux_net") @@ -698,6 +699,7 @@ def _dnsmasq_cmd(net): '--listen-address=%s' % net['gateway'], '--except-interface=lo', '--dhcp-range=%s,static,120s' % net['dhcp_start'], + '--dhcp-lease-max=%s' % IP(net['cidr']).len(), '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), '--dhcp-script=%s' % FLAGS.dhcpbridge, '--leasefile-ro'] -- cgit From f8e808504249aaa7a5278f6c91f7d300ba6dc8f8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 20 May 2011 11:21:52 -0700 Subject: synchronize vlan creation --- nova/network/linux_net.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova') diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 85c4c278c..8e43c5a67 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -449,6 +449,7 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): ensure_bridge(bridge, interface, net_attrs) +@utils.synchronized('ensure_vlan', external=True) def ensure_vlan(vlan_num): """Create a vlan unless it already exists.""" interface = 'vlan%s' % vlan_num -- cgit From 968523ff49fc9d5aed7182b4084b2d7ec9f567ba Mon Sep 17 00:00:00 2001 From: "Dave Walker (Daviey)" Date: Sat, 21 May 2011 13:00:06 +0100 Subject: When adding a keypair that already exists, give a friendly error and no traceback in nova-api. --- nova/api/ec2/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'nova') diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index cd59340bd..4686c32ec 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -338,6 +338,10 @@ class Executor(wsgi.Application): else: return self._error(req, context, type(ex).__name__, unicode(ex)) + except exception.KeyPairExists as ex: + LOG.info(_('KeyPairExists raised: %s'), unicode(ex), + context=context) + return self._error(req, context, type(ex).__name__, unicode(ex)) except Exception as ex: extra = {'environment': req.environ} LOG.exception(_('Unexpected error raised: %s'), unicode(ex), -- cgit From d72815193d64b9dcce974888bef05a18689c0504 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Mon, 23 May 2011 10:37:28 -0400 Subject: Fixed mistyped line --- nova/api/openstack/views/limits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py index 22d1c260d..02fa0fdd6 100644 --- a/nova/api/openstack/views/limits.py +++ b/nova/api/openstack/views/limits.py @@ -79,7 +79,7 @@ class ViewBuilderV11(ViewBuilder): # check for existing key for limit in limits: if limit["uri"] == rate_limit["URI"] and \ - limit["regex"] == limit["regex"]: + limit["regex"] == rate_limit["regex"]: _rate_limit_key = limit break -- cgit From fd85e72a4cd05d7298c253d2ae32502c83482623 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Mon, 23 May 2011 11:10:40 -0400 Subject: Added test --- nova/tests/api/openstack/test_limits.py | 51 +++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 45bd4d501..2324934a6 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -263,6 +263,57 @@ class LimitsControllerV11Test(BaseLimitTestSuite): body = json.loads(response.body) self.assertEqual(expected, body) + def _populate_limits_diff_regex(self, request): + """Put limit info into a request.""" + _limits = [ + limits.Limit("GET", "*", ".*", 10, 60).display(), + limits.Limit("GET", "*", "*.*", 10, 60).display(), + ] + request.environ["nova.limits"] = _limits + return request + + def test_index_diff_regex(self): + """Test getting limit details in JSON.""" + request = self._get_index_request() + request = self._populate_limits_diff_regex(request) + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [ + { + "regex": ".*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": 0, + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + ], + }, + { + "regex": "*.*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": 0, + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + ], + }, + + ], + "absolute": {}, + }, + } + body = json.loads(response.body) + self.assertEqual(expected, body) + class LimitMiddlewareTest(BaseLimitTestSuite): """ -- cgit From ffac2aa8162ba5111a01b495d9dd7e43bfda4af4 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 14:38:37 -0500 Subject: initial fudging in of swap disk --- nova/tests/xenapi/stubs.py | 2 +- nova/virt/xenapi/vm_utils.py | 18 ++++++++++++------ nova/virt/xenapi/vmops.py | 21 +++++++++++++-------- 3 files changed, 26 insertions(+), 15 deletions(-) (limited to 'nova') diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 4833ccb07..d9306900d 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -37,7 +37,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return vdi_uuid + return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=None) stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9f6cd608c..c24fc7ba6 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -408,18 +408,24 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) - vdi_uuid = session.wait_for_task(task, instance_id) + vdi_uuids = session.wait_for_task(task, instance_id) + primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') + swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid') cls.scan_sr(session, instance_id, sr_ref) # Set the name-label to ease debugging - vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) - name_label = get_name_label_for_image(image) - session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) + primary_vdi_ref = session.get_xenapi().VDI.get_by_uuid(primary_vdi_uuid) + primary_name_label = get_name_label_for_image(image) + session.get_xenapi().VDI.set_name_label(primary_vdi_ref, primary_name_label) - LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s") + LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(primary_vdi_uuid)s") % locals()) - return vdi_uuid + + LOG.debug("=" * 100) + LOG.debug(rimary_vdi_uuid) + LOG.debug(swap_vdi_uuid) + return (primary_vdi_uuid, swap_vdi_uuid) @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0074444f8..4a01cac29 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -109,20 +109,20 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - vdi_uuid = VMHelper.fetch_image(self._session, instance.id, - instance.image_id, user, project, disk_image_type) - return vdi_uuid + (primary_vdi_uuid, swap_vdi_uuid) = VMHelper.fetch_image(self._session, + instance.id, instance.image_id, user, project, disk_image_type) + return (primary_vdi_uuid, swap_vdi_uuid) def spawn(self, instance, network_info=None): - vdi_uuid = self._create_disk(instance) - vm_ref = self._create_vm(instance, vdi_uuid, network_info) + vdi_uuid, swap_uuid = self._create_disk(instance) + vm_ref = self._create_vm(instance, vdi_uuid, swap_uuid, network_info) self._spawn(instance, vm_ref) def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdi_uuid, network_info=None): + def _create_vm(self, instance, vdi_uuid, swap_vdi_uuid=None, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -143,18 +143,20 @@ class VMOps(object): # Are we building from a pre-existing disk? vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + if swap_vdi_uuid: + swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) + instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)[0] ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)[0] use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, vdi_ref, disk_image_type, instance.os_type) @@ -163,6 +165,9 @@ class VMOps(object): VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, userdevice=0, bootable=True) + if swap_vdi_uuid: + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=swap_vdi_ref, userdevice=0, bootable=False) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. -- cgit From 94766fac0f5fdb3c7847b1129a8f05948a97f887 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 20:42:54 +0000 Subject: cleanup and fixes --- nova/virt/xenapi/vm_utils.py | 18 ++++++++++-------- nova/virt/xenapi/vmops.py | 40 ++++++++++++++++++++++++---------------- 2 files changed, 34 insertions(+), 24 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c24fc7ba6..f1f7b8249 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -410,7 +410,7 @@ class VMHelper(HelperBase): task = session.async_call_plugin('glance', 'download_vhd', kwargs) vdi_uuids = session.wait_for_task(task, instance_id) primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') - swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid') + swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) cls.scan_sr(session, instance_id, sr_ref) @@ -419,13 +419,14 @@ class VMHelper(HelperBase): primary_name_label = get_name_label_for_image(image) session.get_xenapi().VDI.set_name_label(primary_vdi_ref, primary_name_label) - LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(primary_vdi_uuid)s") - % locals()) + LOG.debug(_("xapi 'download_vhd' returned VDI UUID " + "%(primary_vdi_uuid)s") % locals()) + if swap_vdi_uuid: + LOG.debug(_("xapi 'download_vhd' returned SWAP VDI UUID " + "%(swap_vdi_uuid)s") % locals()) - LOG.debug("=" * 100) - LOG.debug(rimary_vdi_uuid) - LOG.debug(swap_vdi_uuid) - return (primary_vdi_uuid, swap_vdi_uuid) + LOG.debug("=" * 100) + return vdi_uuids @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, @@ -482,7 +483,8 @@ class VMHelper(HelperBase): LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) return filename else: - return session.get_xenapi().VDI.get_uuid(vdi_ref) + vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) + return {'primary_vdi_uuid': vdi_uuid} @classmethod def determine_disk_image_type(cls, instance): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 4a01cac29..0c30ad4cb 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -109,20 +109,21 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - (primary_vdi_uuid, swap_vdi_uuid) = VMHelper.fetch_image(self._session, - instance.id, instance.image_id, user, project, disk_image_type) - return (primary_vdi_uuid, swap_vdi_uuid) + vdi_uuids = VMHelper.fetch_image(self._session, + instance.id, instance.image_id, user, project, + disk_image_type) + return vdi_uuids def spawn(self, instance, network_info=None): - vdi_uuid, swap_uuid = self._create_disk(instance) - vm_ref = self._create_vm(instance, vdi_uuid, swap_uuid, network_info) + vdi_uuids = self._create_disk(instance) + vm_ref = self._create_vm(instance, vdi_uuids, network_info) self._spawn(instance, vm_ref) def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdi_uuid, swap_vdi_uuid=None, network_info=None): + def _create_vm(self, instance, vdi_uuids, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -142,30 +143,37 @@ class VMOps(object): project = AuthManager().get_project(instance.project_id) # Are we building from a pre-existing disk? - vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + primary_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdi_uuids['primary_vdi_uuid']) + swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) if swap_vdi_uuid: swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) + else: + swap_vdi_ref = None disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)[0] + instance.kernel_id, user, project, + ImageType.KERNEL_RAMDISK) ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)[0] + instance.ramdisk_id, user, project, + ImageType.KERNEL_RAMDISK) - use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, - vdi_ref, disk_image_type, instance.os_type) - vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, - use_pv_kernel) + use_pv_kernel = VMHelper.determine_is_pv(self._session, + instance.id, primary_vdi_ref, disk_image_type, + instance.os_type) + vm_ref = VMHelper.create_vm(self._session, instance, kernel, + ramdisk, use_pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=vdi_ref, userdevice=0, bootable=True) - if swap_vdi_uuid: + vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) + if swap_vdi_ref: VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=swap_vdi_ref, userdevice=0, bootable=False) @@ -177,7 +185,7 @@ class VMOps(object): # Alter the image before VM start for, e.g. network injection if FLAGS.xenapi_inject_image: VMHelper.preconfigure_instance(self._session, instance, - vdi_ref, network_info) + primary_vdi_ref, network_info) self.create_vifs(vm_ref, network_info) self.inject_network_info(instance, network_info, vm_ref) -- cgit From 63dbfeb2cb5b834a0cb4dd23c30522f540ac539b Mon Sep 17 00:00:00 2001 From: "Dave Walker (Daviey)" Date: Mon, 23 May 2011 22:15:10 +0100 Subject: Added test case for attempting to create a duplicate keypair --- nova/tests/test_api.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'nova') diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 97f401b87..7c0331eff 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -224,6 +224,29 @@ class ApiEc2TestCase(test.TestCase): self.manager.delete_project(project) self.manager.delete_user(user) + def test_create_duplicate_key_pair(self): + """Test that, after successfully generating a keypair, + requesting a second keypair with the same name fails sanely""" + self.expect_http() + self.mox.ReplayAll() + keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ + for x in range(random.randint(4, 8))) + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + # NOTE(vish): create depends on pool, so call helper directly + self.ec2.create_key_pair('test') + + try: + self.ec2.create_key_pair('test') + except EC2ResponseError, e: + if e.code == 'KeyPairExists': + pass + else: + self.fail("Unexpected EC2ResponseError: %s " + "(expected KeyPairExists)" % e.code) + else: + self.fail('Exception not raised.') + def test_get_all_security_groups(self): """Test that we can retrieve security groups""" self.expect_http() -- cgit From 107eedf06ba6d27e65169302bd51a391e6e104f7 Mon Sep 17 00:00:00 2001 From: "Dave Walker (Daviey)" Date: Mon, 23 May 2011 22:15:41 +0100 Subject: Changed ec2 api dupe key exception log handler info->debug --- nova/api/ec2/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 4686c32ec..c13993dd3 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -339,7 +339,7 @@ class Executor(wsgi.Application): return self._error(req, context, type(ex).__name__, unicode(ex)) except exception.KeyPairExists as ex: - LOG.info(_('KeyPairExists raised: %s'), unicode(ex), + LOG.debug(_('KeyPairExists raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) except Exception as ex: -- cgit From 42c209d90f491d19b3aabc70f8dafc33b76cf20d Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 16:51:28 -0500 Subject: fix tests, have glance plugin return json encoded string of vdi uuids --- nova/tests/xenapi/stubs.py | 11 +++++++++-- nova/virt/xenapi/vm_utils.py | 6 +++++- nova/virt/xenapi/vmops.py | 4 ++-- 3 files changed, 16 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index d9306900d..9f6f64318 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -17,6 +17,7 @@ """Stubouts, mocks and fixtures for the test suite""" import eventlet +import json from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils @@ -37,7 +38,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=None) + return {'primary_vdi_uuid': vdi_uuid} stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) @@ -132,10 +133,16 @@ class FakeSessionForVMTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVMTests, self).__init__(uri) - def host_call_plugin(self, _1, _2, _3, _4, _5): + def host_call_plugin(self, _1, _2, plugin, method, _5): sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) + if plugin == "glance" and method == "download_vhd": + swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) + swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) + return '%s' % json.dumps( + {'primary_vdi_uuid': vdi_rec['uuid'], + 'swap_vdi_uuid': swap_vdi_rec['uuid']}) return '%s' % vdi_rec['uuid'] def VM_start(self, _1, ref, _2, _3): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index f1f7b8249..3d980013a 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,6 +19,7 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import json import os import pickle import re @@ -408,7 +409,8 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) - vdi_uuids = session.wait_for_task(task, instance_id) + result = session.wait_for_task(task, instance_id) + vdi_uuids = json.loads(result) primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) @@ -571,6 +573,8 @@ class VMHelper(HelperBase): args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) uuid = session.wait_for_task(task, instance_id) + if image_type != ImageType.KERNEL_RAMDISK: + return {'primary_vdi_uuid': uuid} return uuid @classmethod diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0c30ad4cb..0d7ef5fac 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -91,7 +91,7 @@ class VMOps(object): def finish_resize(self, instance, disk_info): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) - vm_ref = self._create_vm(instance, vdi_uuid) + vm_ref = self._create_vm(instance, {'primary_vdi_uuid': vdi_uuid}) self.resize_instance(instance, vdi_uuid) self._spawn(instance, vm_ref) @@ -144,7 +144,7 @@ class VMOps(object): # Are we building from a pre-existing disk? primary_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', - vdi_uuids['primary_vdi_uuid']) + vdi_uuids.get('primary_vdi_uuid')) swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) if swap_vdi_uuid: swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) -- cgit From 038ce7e16ee7ee1afc86ded260c1aa0d40d1e1ad Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 22:52:56 +0000 Subject: swap should use device 1 and rescue use device 2 --- nova/virt/xenapi/vmops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0d7ef5fac..6ff8fd6a4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -175,7 +175,7 @@ class VMOps(object): vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) if swap_vdi_ref: VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=swap_vdi_ref, userdevice=0, bootable=False) + vdi_ref=swap_vdi_ref, userdevice=1, bootable=False) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. @@ -711,7 +711,7 @@ class VMOps(object): vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, - vdi_ref, 1, False) + vdi_ref, 2, False) self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref) -- cgit From 999d1a4edb1f6992e3bb85e7a45ebd735e8cdb42 Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 24 May 2011 13:19:09 -0700 Subject: Properly reparse flags when adding dynamic flags --- nova/flags.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index 32cb6efa8..7304700f1 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues): return name in self.__dict__['__dirty'] def ClearDirty(self): - self.__dict__['__is_dirty'] = [] + self.__dict__['__dirty'] = [] def WasAlreadyParsed(self): return self.__dict__['__was_already_parsed'] @@ -119,11 +119,11 @@ class FlagValues(gflags.FlagValues): if '__stored_argv' not in self.__dict__: return new_flags = FlagValues(self) - for k in self.__dict__['__dirty']: + for k in self.FlagDict().iterkeys(): new_flags[k] = gflags.FlagValues.__getitem__(self, k) new_flags(self.__dict__['__stored_argv']) - for k in self.__dict__['__dirty']: + for k in new_flags.FlagDict().iterkeys(): setattr(self, k, getattr(new_flags, k)) self.ClearDirty() -- cgit From d8e1f0b6b3ab7a8549773910815b1d2a5d1b8f2f Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 24 May 2011 13:19:09 -0700 Subject: add a test from vish and fix the issues --- nova/flags.py | 1 + nova/tests/test_flags.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index 7304700f1..9eaac5596 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -122,6 +122,7 @@ class FlagValues(gflags.FlagValues): for k in self.FlagDict().iterkeys(): new_flags[k] = gflags.FlagValues.__getitem__(self, k) + new_flags.Reset() new_flags(self.__dict__['__stored_argv']) for k in new_flags.FlagDict().iterkeys(): setattr(self, k, getattr(new_flags, k)) diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py index 707300fcf..05319d91f 100644 --- a/nova/tests/test_flags.py +++ b/nova/tests/test_flags.py @@ -91,6 +91,20 @@ class FlagsTestCase(test.TestCase): self.assert_('runtime_answer' in self.global_FLAGS) self.assertEqual(self.global_FLAGS.runtime_answer, 60) + def test_long_vs_short_flags(self): + flags.DEFINE_string('duplicate_answer_long', 'val', 'desc', + flag_values=self.global_FLAGS) + argv = ['flags_test', '--duplicate_answer=60', 'extra_arg'] + args = self.global_FLAGS(argv) + + self.assert_('duplicate_answer' not in self.global_FLAGS) + self.assert_(self.global_FLAGS.duplicate_answer_long, 60) + + flags.DEFINE_integer('duplicate_answer', 60, 'desc', + flag_values=self.global_FLAGS) + self.assertEqual(self.global_FLAGS.duplicate_answer, 60) + self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val') + def test_flag_leak_left(self): self.assertEqual(FLAGS.flags_unittest, 'foo') FLAGS.flags_unittest = 'bar' -- cgit From 6be49381fc1c232e99de3e9774fb6c3e5b685fcf Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 24 May 2011 13:19:09 -0700 Subject: make fake_flags set defaults instead of runtime values --- nova/tests/fake_flags.py | 28 ++++++++++++++-------------- nova/tests/real_flags.py | 26 -------------------------- 2 files changed, 14 insertions(+), 40 deletions(-) delete mode 100644 nova/tests/real_flags.py (limited to 'nova') diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 5d7ca98b5..ecefc464a 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -21,24 +21,24 @@ from nova import flags FLAGS = flags.FLAGS flags.DECLARE('volume_driver', 'nova.volume.manager') -FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver' -FLAGS.connection_type = 'fake' -FLAGS.fake_rabbit = True +FLAGS['volume_driver'].SetDefault('nova.volume.driver.FakeISCSIDriver') +FLAGS['connection_type'].SetDefault('fake') +FLAGS['fake_rabbit'].SetDefault(True) flags.DECLARE('auth_driver', 'nova.auth.manager') -FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver' +FLAGS['auth_driver'].SetDefault('nova.auth.dbdriver.DbDriver') flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('fake_network', 'nova.network.manager') -FLAGS.network_size = 8 -FLAGS.num_networks = 2 -FLAGS.fake_network = True -FLAGS.image_service = 'nova.image.local.LocalImageService' +FLAGS['network_size'].SetDefault(8) +FLAGS['num_networks'].SetDefault(2) +FLAGS['fake_network'].SetDefault(True) +FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService') flags.DECLARE('num_shelves', 'nova.volume.driver') flags.DECLARE('blades_per_shelf', 'nova.volume.driver') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') -FLAGS.num_shelves = 2 -FLAGS.blades_per_shelf = 4 -FLAGS.iscsi_num_targets = 8 -FLAGS.verbose = True -FLAGS.sqlite_db = "tests.sqlite" -FLAGS.use_ipv6 = True +FLAGS['num_shelves'].SetDefault(2) +FLAGS['blades_per_shelf'].SetDefault(4) +FLAGS['iscsi_num_targets'].SetDefault(8) +FLAGS['verbose'].SetDefault(True) +FLAGS['sqlite_db'].SetDefault("tests.sqlite") +FLAGS['use_ipv6'].SetDefault(True) diff --git a/nova/tests/real_flags.py b/nova/tests/real_flags.py deleted file mode 100644 index 71da04992..000000000 --- a/nova/tests/real_flags.py +++ /dev/null @@ -1,26 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import flags - -FLAGS = flags.FLAGS - -FLAGS.connection_type = 'libvirt' -FLAGS.fake_rabbit = False -FLAGS.fake_network = False -FLAGS.verbose = False -- cgit From f488576ae27f8eb96a04022d0ecd11a28bd15116 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Tue, 24 May 2011 16:44:28 -0400 Subject: Added filtering on image properties --- nova/api/openstack/images.py | 23 +++++++++++++++++++++-- nova/image/fake.py | 4 ++-- nova/image/glance.py | 8 ++++---- nova/tests/api/openstack/fakes.py | 4 ++-- nova/tests/api/openstack/test_images.py | 9 +++++++++ nova/tests/image/test_glance.py | 2 +- 6 files changed, 39 insertions(+), 11 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 34d4c27fc..755ce8ead 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -28,6 +28,9 @@ from nova.api.openstack.views import images as images_view LOG = log.getLogger('nova.api.openstack.images') FLAGS = flags.FLAGS +SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', + 'size_min', 'size_max'] + class Controller(common.OpenstackController): """Base `wsgi.Controller` for retrieving/displaying images.""" @@ -59,7 +62,8 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object """ context = req.environ['nova.context'] - images = self._image_service.index(context) + filters = self._get_filters(req) + images = self._image_service.index(context, filters) images = common.limited(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=False) for image in images]) @@ -70,11 +74,26 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object. """ context = req.environ['nova.context'] - images = self._image_service.detail(context) + filters = self._get_filters(req) + images = self._image_service.detail(context, filters) images = common.limited(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + for param in req.str_params: + if param in SUPPORTED_FILTERS or param.startswith('property-'): + filters[param] = req.str_params.get(param) + + return filters + def show(self, req, id): """Return detailed information about a specific image. diff --git a/nova/image/fake.py b/nova/image/fake.py index b400b2adb..8e84c8597 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -52,11 +52,11 @@ class FakeImageService(service.BaseImageService): self.create(None, image) super(FakeImageService, self).__init__() - def index(self, context): + def index(self, context, filters=None): """Returns list of images.""" return copy.deepcopy(self.images.values()) - def detail(self, context): + def detail(self, context, filters=None): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) diff --git a/nova/image/glance.py b/nova/image/glance.py index 193e37273..dec797619 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -58,23 +58,23 @@ class GlanceImageService(service.BaseImageService): else: self.client = client - def index(self, context): + def index(self, context, filters=None): """Calls out to Glance for a list of images available.""" # NOTE(sirp): We need to use `get_images_detailed` and not # `get_images` here because we need `is_public` and `properties` # included so we can filter by user filtered = [] - image_metas = self.client.get_images_detailed() + image_metas = self.client.get_images_detailed(filters=filters) for image_meta in image_metas: if self._is_image_available(context, image_meta): meta_subset = utils.subset_dict(image_meta, ('id', 'name')) filtered.append(meta_subset) return filtered - def detail(self, context): + def detail(self, context, filters=None): """Calls out to Glance for a list of detailed image information.""" filtered = [] - image_metas = self.client.get_images_detailed() + image_metas = self.client.get_images_detailed(filters=filters) for image_meta in image_metas: if self._is_image_available(context, image_meta): base_image_meta = self._translate_to_base(image_meta) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index bf51239e6..8e0156afa 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -166,11 +166,11 @@ def stub_out_glance(stubs, initial_fixtures=None): def __init__(self, initial_fixtures): self.fixtures = initial_fixtures or [] - def fake_get_images(self): + def fake_get_images(self, filters=None): return [dict(id=f['id'], name=f['name']) for f in self.fixtures] - def fake_get_images_detailed(self): + def fake_get_images_detailed(self, filters=None): return copy.deepcopy(self.fixtures) def fake_get_image_meta(self, image_id): diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 2c329f920..76d4e2f56 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -708,6 +708,15 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) + def test_get_image_request_filters(self): + request =\ + webob.Request.blank('/v1.1/images/detail?status=ACTIVE&name=testname') + filters = images.Controller()._get_filters(request) + expected = {'status': 'ACTIVE', + 'name': 'testname', + } + self.assertDictMatch(expected, filters) + def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 109905ded..6d108d494 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -34,7 +34,7 @@ class StubGlanceClient(object): def get_image_meta(self, image_id): return self.images[image_id] - def get_images_detailed(self): + def get_images_detailed(self, filters=None): return self.images.itervalues() def get_image(self, image_id): -- cgit From 36a3b9dc172a109e1f17dbc531a574ebf9e37453 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 24 May 2011 21:41:44 +0000 Subject: need to strip newline from openssl stdout data --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0074444f8..8c80ce7b1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1177,7 +1177,7 @@ class SimpleDH(object): err = proc.stderr.read() if err: raise RuntimeError(_('OpenSSL error: %s') % err) - return proc.stdout.read() + return proc.stdout.read().strip('\n') def encrypt(self, text): return self._run_ssl(text, 'enc') -- cgit From 0acbf6d77f02ca0fa3a11e29a55bbb617c33a816 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 24 May 2011 22:13:59 +0000 Subject: DHSimple's decrypt needs to append \n when writing to stdin --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 8c80ce7b1..45b04351d 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1171,7 +1171,7 @@ class SimpleDH(object): shared = self._shared cmd = base_cmd % locals() proc = _runproc(cmd) - proc.stdin.write(text) + proc.stdin.write(text + '\n') proc.stdin.close() proc.wait() err = proc.stderr.read() -- cgit From 26842cba90bd5637bd6aa185b300102ff257d9f1 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 24 May 2011 22:39:16 +0000 Subject: move devices back --- nova/virt/xenapi/vmops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6ff8fd6a4..6fff1d494 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -175,7 +175,7 @@ class VMOps(object): vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) if swap_vdi_ref: VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=swap_vdi_ref, userdevice=1, bootable=False) + vdi_ref=swap_vdi_ref, userdevice=2, bootable=False) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. @@ -711,7 +711,7 @@ class VMOps(object): vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, - vdi_ref, 2, False) + vdi_ref, 1, False) self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref) -- cgit From d4ac3a309b23875312014abaf3fb8f84d373825a Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 24 May 2011 22:49:42 +0000 Subject: Don't pass a tuple since spawn_n will get the arguments with *args anyway --- nova/compute/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 2dbea8050..a05112afb 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -658,8 +658,8 @@ class API(base.Base): def set_admin_password(self, context, instance_id, password=None): """Set the root/admin password for the given instance.""" - eventlet.spawn_n(self._set_admin_password, (context, instance_id, - password)) + eventlet.spawn_n(self._set_admin_password, context, instance_id, + password) def inject_file(self, context, instance_id): """Write a file to the given instance.""" -- cgit From e0aa1369d8050f023fee1e60b276d44a6298feb9 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 24 May 2011 21:09:43 -0700 Subject: instead of the API spawning a greenthread to wait for a host to be picked, the instance to boot, etc for setting the admin password... let's push the admin password down to the scheduler so that compute can just take care of setting the password as a part of the build process. --- nova/api/openstack/servers.py | 5 ++--- nova/compute/api.py | 22 ++++++++++------------ nova/compute/manager.py | 1 + nova/virt/xenapi/vmops.py | 8 ++++++++ 4 files changed, 21 insertions(+), 15 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index fcb630fae..789c69977 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -180,7 +180,8 @@ class Controller(common.OpenstackController): key_name=key_name, key_data=key_data, metadata=env['server'].get('metadata', {}), - injected_files=injected_files) + injected_files=injected_files, + admin_password=password) except quota.QuotaError as error: self._handle_quota_error(error) @@ -190,8 +191,6 @@ class Controller(common.OpenstackController): builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) server['server']['adminPass'] = password - self.compute_api.set_admin_password(context, server['server']['id'], - password) return server def _deserialize_create(self, request): diff --git a/nova/compute/api.py b/nova/compute/api.py index a12b7dee5..3ed138f69 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -134,7 +134,8 @@ class API(base.Base): display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, - injected_files=None): + injected_files=None, + admin_password=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. @@ -264,7 +265,8 @@ class API(base.Base): "instance_id": instance_id, "instance_type": instance_type, "availability_zone": availability_zone, - "injected_files": injected_files}}) + "injected_files": injected_files, + "admin_password": admin_password}}) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) @@ -503,14 +505,6 @@ class API(base.Base): raise exception.Error(_("Unable to find host for Instance %s") % instance_id) - def _set_admin_password(self, context, instance_id, password): - """Set the root/admin password for the given instance.""" - host = self._find_host(context, instance_id) - - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "set_admin_password", - "args": {"instance_id": instance_id, "new_pass": password}}) def snapshot(self, context, instance_id, name): """Snapshot the given instance. @@ -665,8 +659,12 @@ class API(base.Base): def set_admin_password(self, context, instance_id, password=None): """Set the root/admin password for the given instance.""" - eventlet.spawn_n(self._set_admin_password, context, instance_id, - password) + host = self._find_host(context, instance_id) + + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "set_admin_password", + "args": {"instance_id": instance_id, "new_pass": password}}) def inject_file(self, context, instance_id): """Write a file to the given instance.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 11565c25e..e124439ed 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -221,6 +221,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_ref.injected_files = kwargs.get('injected_files', []) + instance_ref.admin_password = kwargs.get('admin_password', None) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) LOG.audit(_("instance %s: starting..."), instance_id, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 45b04351d..a16c6a0d8 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -202,6 +202,13 @@ class VMOps(object): for path, contents in instance.injected_files: LOG.debug(_("Injecting file path: '%s'") % path) self.inject_file(instance, path, contents) + + def _set_admin_password(): + admin_password = instance.admin_password + if admin_password: + LOG.debug(_("Setting admin password")) + self.set_admin_password(instance, admin_password) + # NOTE(armando): Do we really need to do this in virt? # NOTE(tr3buchet): not sure but wherever we do it, we need to call # reset_network afterwards @@ -214,6 +221,7 @@ class VMOps(object): LOG.debug(_('Instance %s: booted'), instance_name) timer.stop() _inject_files() + _set_admin_password() return True except Exception, exc: LOG.warn(exc) -- cgit From 9b9f2c40d847e5be3972f51a897332874d704f1e Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 04:48:00 +0000 Subject: pep8 fix in nova/compute/api.py --- nova/compute/api.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 3ed138f69..86cd4514f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -505,7 +505,6 @@ class API(base.Base): raise exception.Error(_("Unable to find host for Instance %s") % instance_id) - def snapshot(self, context, instance_id, name): """Snapshot the given instance. -- cgit From 17abaeafaf3fed2847e4377a16b47771eb663304 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 16:27:28 +0900 Subject: Fix wrong call of the volume api create() --- nova/api/openstack/contrib/volumes.py | 2 +- nova/tests/test_quota.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 18de2ec71..b22bd2846 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -135,7 +135,7 @@ class VolumeController(wsgi.Controller): vol = env['volume'] size = vol['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) - new_volume = self.volume_api.create(context, size, + new_volume = self.volume_api.create(context, size, None, vol.get('display_name'), vol.get('display_description')) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 7ace2ad7d..990068fae 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -228,6 +228,7 @@ class QuotaTestCase(test.TestCase): volume.API().create, self.context, size=10, + snapshot_id=None, name='', description='') for volume_id in volume_ids: @@ -241,6 +242,7 @@ class QuotaTestCase(test.TestCase): volume.API().create, self.context, size=10, + snapshot_id=None, name='', description='') for volume_id in volume_ids: -- cgit From 7139cf1f0cfe9241a1710e5b7c621db569a2fc2d Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 16:37:52 +0900 Subject: Make snapshot_id=None a default value in VolumeManager:create_volume(). It is not a regular case to create a volume from a snapshot. --- nova/volume/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 84085fbd8..b6f0f5eeb 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -90,7 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volume(self, context, volume_id, snapshot_id): + def create_volume(self, context, volume_id, snapshot_id=None): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) -- cgit From f3125b3012da7b6429e4e551060498e665c4596e Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 17:51:30 +0900 Subject: Add unittests for cloning volumes. --- nova/tests/test_cloud.py | 19 +++++++++++++++++++ nova/tests/test_volume.py | 20 +++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index d9169a646..8c7520fe8 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -171,6 +171,25 @@ class CloudTestCase(test.TestCase): db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) + def test_create_volume_from_snapshot(self): + """Makes sure create_volume works when we specify a snapshot.""" + vol = db.volume_create(self.context, {'size': 1}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'volume_size': vol['size'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + + result = self.cloud.create_volume(self.context, + snapshot_id=snapshot_id) + volume_id = result['volumeId'] + result = self.cloud.describe_volumes(self.context) + self.assertEqual(len(result['volumeSet']), 2) + self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id) + + db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id)) + db.snapshot_destroy(self.context, snap['id']) + db.volume_destroy(self.context, vol['id']) + def test_describe_availability_zones(self): """Makes sure describe_availability_zones works and filters results.""" service1 = db.service_create(self.context, {'host': 'host1_zones', diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index c66b66959..1c25d601a 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase): self.context = context.get_admin_context() @staticmethod - def _create_volume(size='0'): + def _create_volume(size='0', snapshot_id=None): """Create a volume object.""" vol = {} vol['size'] = size + vol['snapshot_id'] = snapshot_id vol['user_id'] = 'fake' vol['project_id'] = 'fake' vol['availability_zone'] = FLAGS.storage_availability_zone @@ -69,6 +70,23 @@ class VolumeTestCase(test.TestCase): self.context, volume_id) + def test_create_volume_from_snapshot(self): + """Test volume can be created from a snapshot.""" + volume_src_id = self._create_volume() + self.volume.create_volume(self.context, volume_src_id) + snapshot_id = self._create_snapshot(volume_src_id) + self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) + volume_dst_id = self._create_volume(0, snapshot_id) + self.volume.create_volume(self.context, volume_dst_id, snapshot_id) + self.assertEqual(volume_dst_id, db.volume_get(context.get_admin_context(), + volume_dst_id).id) + self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), + volume_dst_id).snapshot_id) + + self.volume.delete_volume(self.context, volume_dst_id) + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_src_id) + def test_too_big_volume(self): """Ensure failure if a too large of a volume is requested.""" # FIXME(vish): validation needs to move into the data layer in -- cgit From d380729b162c8d6120279db74327e61a4942e28f Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 18:02:07 +0900 Subject: Avoid wildcard import. --- .../sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py index 0a50123bf..10bd9d5c9 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py @@ -15,8 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Column, Table, MetaData, Integer from nova import log as logging -- cgit From 3d9569147cee2eaa94fc49c55b40f70a72171ebe Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 25 May 2011 09:33:51 -0400 Subject: Added test --- nova/tests/api/openstack/test_images.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 76d4e2f56..233419c6d 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -709,11 +709,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) def test_get_image_request_filters(self): - request =\ - webob.Request.blank('/v1.1/images/detail?status=ACTIVE&name=testname') + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE&name=testname&property-test=3') filters = images.Controller()._get_filters(request) expected = {'status': 'ACTIVE', 'name': 'testname', + 'property-test': '3', + } + self.assertDictMatch(expected, filters) + + def test_get_image_request_filters_not_supported(self): + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') + filters = images.Controller()._get_filters(request) + expected = {'status': 'ACTIVE', } self.assertDictMatch(expected, filters) -- cgit From 7387af3ab5a310f7c427f0257e531871f62f398d Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Wed, 25 May 2011 14:57:52 +0000 Subject: Changed the exception type to not require an instance ID --- nova/virt/xenapi/vmops.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 45b04351d..aaf5585b1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -253,7 +253,8 @@ class VMOps(object): instance_name = instance_or_vm.name vm_ref = VMHelper.lookup(self._session, instance_name) if vm_ref is None: - raise exception.InstanceNotFound(instance_id=instance_obj.id) + raise exception.NotFound(_("No opaque_ref could be determined " + "for '%s'.") % instance_or_vm) return vm_ref def _acquire_bootlock(self, vm): -- cgit From 537c5aea298a6c09b3329185c2d0eed77a0a21bd Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Wed, 25 May 2011 12:09:53 -0400 Subject: try out mox for testing image request filters --- nova/tests/api/openstack/test_images.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 233419c6d..e25334732 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -28,6 +28,7 @@ import shutil import tempfile import xml.dom.minidom as minidom +import mox import stubout import webob @@ -709,14 +710,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) def test_get_image_request_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE', + 'name': 'testname', + 'property-test': '3'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?status=ACTIVE&name=testname&property-test=3') - filters = images.Controller()._get_filters(request) - expected = {'status': 'ACTIVE', - 'name': 'testname', - 'property-test': '3', - } - self.assertDictMatch(expected, filters) + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() def test_get_image_request_filters_not_supported(self): request = webob.Request.blank( -- cgit From e4bf97ba29e8e5858f37cedb34e20ccd8e210bae Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 25 May 2011 12:24:27 -0400 Subject: Updated tests to use mox pep8 --- nova/api/openstack/images.py | 2 +- nova/tests/api/openstack/test_images.py | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 755ce8ead..553566d58 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -93,7 +93,7 @@ class Controller(common.OpenstackController): filters[param] = req.str_params.get(param) return filters - + def show(self, req, id): """Return detailed information about a specific image. diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index e25334732..f3f0217d6 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -726,12 +726,18 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): mocker.VerifyAll() def test_get_image_request_filters_not_supported(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') - filters = images.Controller()._get_filters(request) - expected = {'status': 'ACTIVE', - } - self.assertDictMatch(expected, filters) + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') -- cgit From ed582a8b86f81140affd88805ba9989b591577cd Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 17:01:20 +0000 Subject: change install_ref.admin_password to instance_ref.admin_pass to match the DB --- nova/compute/manager.py | 2 +- nova/virt/xenapi/vmops.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e124439ed..0a4064440 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -221,7 +221,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_ref.injected_files = kwargs.get('injected_files', []) - instance_ref.admin_password = kwargs.get('admin_password', None) + instance_ref.admin_pass = kwargs.get('admin_password', None) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) LOG.audit(_("instance %s: starting..."), instance_id, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a16c6a0d8..e2d453d21 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -204,7 +204,7 @@ class VMOps(object): self.inject_file(instance, path, contents) def _set_admin_password(): - admin_password = instance.admin_password + admin_password = instance.admin_pass if admin_password: LOG.debug(_("Setting admin password")) self.set_admin_password(instance, admin_password) -- cgit From b933f90faecaddf7281455f4824577b586e07f0c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 17:55:51 +0000 Subject: updating admin_pass moved down to compute where the password is actually reset. only update if it succeeds. --- nova/api/openstack/servers.py | 1 - nova/virt/xenapi/vmops.py | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 789c69977..5c10fc916 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -607,7 +607,6 @@ class ControllerV10(Controller): def _parse_update(self, context, server_id, inst_dict, update_dict): if 'adminPass' in inst_dict['server']: - update_dict['admin_pass'] = inst_dict['server']['adminPass'] self.compute_api.set_admin_password(context, server_id, inst_dict['server']['adminPass']) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c9396cffe..be6ef48ea 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -466,6 +466,9 @@ class VMOps(object): # Successful return code from password is '0' if resp_dict['returncode'] != '0': raise RuntimeError(resp_dict['message']) + db.instance_update(context.get_admin_context(), + instance['id'], + dict(admin_pass=new_pass)) return resp_dict['message'] def inject_file(self, instance, path, contents): -- cgit From f2507b3cb77538c1434fea485c4861c11ef3f48b Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 19:05:20 +0000 Subject: fix forever looping on a password reset API call --- nova/compute/manager.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0a4064440..d1e01f275 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -406,22 +406,28 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def set_admin_password(self, context, instance_id, new_pass=None): - """Set the root/admin password for an instance on this host.""" + """Set the root/admin password for an instance on this host. + + This is generally only called by API password resets after an + image has been built. + """ + context = context.elevated() if new_pass is None: # Generate a random password new_pass = utils.generate_password(FLAGS.password_length) - while True: + max_tries = 10 + + for i in xrange(max_tries): instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref["id"] instance_state = instance_ref["state"] expected_state = power_state.RUNNING if instance_state != expected_state: - time.sleep(5) - continue + raise exception.Error(_('Instance is not running')) else: try: self.driver.set_admin_password(instance_ref, new_pass) @@ -437,6 +443,12 @@ class ComputeManager(manager.SchedulerDependentManager): except Exception, e: # Catch all here because this could be anything. LOG.exception(e) + if i == max_tries - 1: + # At some point this exception may make it back + # to the API caller, and we don't want to reveal + # too much. The real exception is logged above + raise exception.Error(_('Internal error')) + time.sleep(1) continue @exception.wrap_exception -- cgit From aebbb90f84e8793040c7dd75eb67ae4914186301 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Wed, 25 May 2011 15:51:47 -0400 Subject: pep8 fixes --- nova/tests/api/openstack/test_limits.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 7f941ef17..1bbe96612 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -100,7 +100,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): self.absolute_limits = { 'instances': 5, 'cores': 8, - 'ram': 2**13, + 'ram': 2 ** 13, 'volumes': 21, 'gigabytes': 34, 'metadata_items': 55, @@ -150,7 +150,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): "absolute": { "maxTotalInstances": 5, "maxTotalCores": 8, - "maxTotalRAMSize": 2**13, + "maxTotalRAMSize": 2 ** 13, "maxServerMeta": 55, "maxImageMeta": 55, "maxPersonality": 89, -- cgit From bd0b4b87da9e960042c3d0caf00370ef526ce8b7 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 20:10:25 +0000 Subject: fix test. instance is not updated in DB with admin password in the API anymore --- nova/tests/api/openstack/test_servers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index dc8815845..fbde5c9ce 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -774,8 +774,7 @@ class ServersTest(test.TestCase): def server_update(context, id, params): filtered_dict = dict( - display_name='server_test', - admin_pass='bacon', + display_name='server_test' ) self.assertEqual(params, filtered_dict) return filtered_dict -- cgit From c440aecaaacf3caa8683234022bc10836d232971 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 25 May 2011 17:28:10 -0400 Subject: Added params to local and base image service --- nova/image/local.py | 4 ++-- nova/image/service.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/image/local.py b/nova/image/local.py index 918180bae..677d5302b 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -63,7 +63,7 @@ class LocalImageService(service.BaseImageService): images.append(unhexed_image_id) return images - def index(self, context): + def index(self, context, *args, **kwargs): filtered = [] image_metas = self.detail(context) for image_meta in image_metas: @@ -71,7 +71,7 @@ class LocalImageService(service.BaseImageService): filtered.append(meta) return filtered - def detail(self, context): + def detail(self, context, *args, **kwargs): images = [] for image_id in self._ids(): try: diff --git a/nova/image/service.py b/nova/image/service.py index ab6749049..5361cfc89 100644 --- a/nova/image/service.py +++ b/nova/image/service.py @@ -46,7 +46,7 @@ class BaseImageService(object): # the ImageService subclass SERVICE_IMAGE_ATTRS = [] - def index(self, context): + def index(self, context, *args, **kwargs): """List images. :returns: a sequence of mappings with the following signature @@ -55,7 +55,7 @@ class BaseImageService(object): """ raise NotImplementedError - def detail(self, context): + def detail(self, context, *args, **kwargs): """Detailed information about an images. :returns: a sequence of mappings with the following signature -- cgit From fdd27860724cd57db6df059a97e98289f88ce6ac Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: add support to rpc for multicall --- nova/rpc.py | 99 +++++++++++++++++++++++++++++++++++++------------- nova/tests/test_rpc.py | 17 +++++++++ 2 files changed, 90 insertions(+), 26 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index 2116f22c3..04198a4a6 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -32,8 +32,11 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +import eventlet from eventlet import greenpool from eventlet import greenthread +from eventlet import queue + from nova import context from nova import exception @@ -131,7 +134,8 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch( + no_ack, auto_ack, enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -347,8 +351,9 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) LOG.debug(_('unpacked context: %s'), context_dict) - return context.RequestContext.from_dict(context_dict) + return RpcContext.from_dict(context_dict) def _pack_context(msg, context): @@ -365,26 +370,27 @@ def _pack_context(msg, context): msg.update(context) -def call(context, topic, msg): - """Sends a message on a topic and wait for a response.""" +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + msg_reply(self.msg_id, *args, **kwargs) + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - class WaitMessage(object): - def __call__(self, data, message): - """Acks message and sets result.""" - message.ack() - if data['failure']: - self.result = RemoteError(*data['failure']) - else: - self.result = data['result'] - - wait_msg = WaitMessage() conn = Connection.instance() consumer = DirectConsumer(connection=conn, msg_id=msg_id) + wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) conn = Connection.instance() @@ -392,18 +398,59 @@ def call(context, topic, msg): publisher.send(msg) publisher.close() - try: - consumer.wait(limit=1) - except StopIteration: - pass - consumer.close() - # NOTE(termie): this is a little bit of a change from the original - # non-eventlet code where returning a Failure - # instance from a deferred call is very similar to - # raising an exception - if isinstance(wait_msg.result, Exception): - raise wait_msg.result - return wait_msg.result + return wait_msg + + +class MulticallWaiter(object): + def __init__(self, consumer): + self._consumer = consumer + self._results = queue.Queue() + self._closed = False + + def close(self): + self._closed = True + self._consumer.close() + + def __call__(self, data, message): + """Acks message and sets result.""" + message.ack() + if data['failure']: + self._results.put(RemoteError(*data['failure'])) + else: + self._results.put(data['result']) + + def __iter__(self): + return self.wait() + + def wait(self): + # TODO(termie): This is probably really a much simpler issue but am + # trying to solve the problem quickly. This works but + # I'd prefer to dig in and do it the best way later on. + + def _waiter(): + while not self._closed: + try: + self._consumer.wait(limit=1) + except StopIteration: + pass + eventlet.spawn(_waiter) + + while True: + result = self._results.get() + if isinstance(result, Exception): + raise result + if result == None: + self.close() + raise StopIteration + yield result + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + for x in rv: + rv.close() + return x def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 44d7c91eb..92ddfcffc 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -49,6 +49,17 @@ class RpcTestCase(test.TestCase): "args": {"value": value}}) self.assertEqual(value, result) + def test_multicall_succeed_three_times(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + + for x in result: + self.assertEqual(value, x) + def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 @@ -126,6 +137,12 @@ class TestReceiver(object): LOG.debug(_("Received %s"), context) return context.to_dict() + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value) + context.reply(value) + @staticmethod def fail(context, value): """Raises an exception with the value sent in""" -- cgit From d46c9fffe4fab8f55483c73d3e6ef12116de9bc5 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: make the test more expicit --- nova/tests/test_rpc.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 92ddfcffc..acab3e758 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -56,9 +56,10 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times", "args": {"value": value}}) - + i = 0 for x in result: - self.assertEqual(value, x) + self.assertEqual(value + i, x) + i += 1 def test_context_passed(self): """Makes sure a context is passed through rpc call""" @@ -140,8 +141,8 @@ class TestReceiver(object): @staticmethod def echo_three_times(context, value): context.reply(value) - context.reply(value) - context.reply(value) + context.reply(value + 1) + context.reply(value + 2) @staticmethod def fail(context, value): -- cgit From 7622e854ef68fbdbfc531690cf74916301956c8e Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: add commented out unworking code for yield-based returns --- nova/rpc.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index 04198a4a6..f43291c4b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -201,6 +201,11 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: + # TODO(termie): re-enable when fix the yielding issue + #if hasattr(rval, 'send'): + # logging.error('rval! %s', rval) + # for x in rval: + # msg_reply(msg_id, x, None) msg_reply(msg_id, rval, None) except Exception as e: logging.exception('Exception during message handling') -- cgit From b44c1fe9561ee8754137d2700bab295f20a4032b Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Add a connection pool for rpc cast/call Use the same rabbit connection for all topic listening and wait to be notified vs doing a 0.1 second poll for each. --- nova/rpc.py | 96 ++++++++++++++++++++++++++++++++++++++++++--------------- nova/service.py | 21 +++++++------ 2 files changed, 84 insertions(+), 33 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index f43291c4b..62590ca92 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -35,9 +35,9 @@ from carrot import messaging import eventlet from eventlet import greenpool from eventlet import greenthread +from eventlet import pools from eventlet import queue - from nova import context from nova import exception from nova import fakerabbit @@ -92,6 +92,11 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() +class Pool(pools.Pool): + def create(self): + return Connection.instance(new=True) + +ConnectionPool = Pool(max_size=20) class Consumer(messaging.Consumer): """Consumer base class. @@ -163,21 +168,9 @@ class AdapterConsumer(Consumer): self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + self.register_callback(self.process_data) - def receive(self, *args, **kwargs): - self.pool.spawn_n(self._receive, *args, **kwargs) - - @exception.wrap_exception - def _receive(self, message_data, message): - """Magically looks for a method on the proxy object and calls it. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - - """ + def process_data(self, message_data, message): LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -194,6 +187,19 @@ class AdapterConsumer(Consumer): LOG.warn(_('no method for message: %s') % message_data) msg_reply(msg_id, _('No method for message: %s') % message_data) return + self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) + + @exception.wrap_exception + def _process_data(self, msg_id, ctxt, method, args): + """Magically looks for a method on the proxy object and calls it. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) @@ -214,11 +220,6 @@ class AdapterConsumer(Consumer): return -class Publisher(messaging.Publisher): - """Publisher base class.""" - pass - - class TopicAdapterConsumer(AdapterConsumer): """Consumes messages on a specific topic.""" @@ -251,6 +252,50 @@ class FanoutAdapterConsumer(AdapterConsumer): topic=topic, proxy=proxy) +class ConsumerSet(object): + """Groups consumers to listen on together on a single connection""" + + def __init__(self, conn, consumer_list): + self.consumer_list = set(consumer_list) + self.consumer_set = None + self.init(conn) + + def init(self, conn): + if not conn: + conn = Connection.instance(new=True) + if self.consumer_set: + self.consumer_set.close() + self.consumer_set = messaging.ConsumerSet(conn) + for consumer in self.consumer_list: + consumer.connection = conn + # consumer.backend is set for us + self.consumer_set.add_consumer(consumer) + + def reconnect(self): + self.init(None) + + def wait(self, limit=None): + while True: + it = self.consumer_set.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + except Exception as e: + LOG.error(_("Received exception %s " % str(e) + \ + "while processing consumer")) + fuck + self.reconnect() + # Break to outer loop + break + + +class Publisher(messaging.Publisher): + """Publisher base class.""" + pass + + class TopicPublisher(Publisher): """Publishes messages on a specific topic.""" @@ -315,7 +360,7 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) @@ -324,7 +369,9 @@ def msg_reply(msg_id, reply=None, failure=None): {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), 'failure': failure}) + publisher.close() + ConnectionPool.put(conn) class RemoteError(exception.Error): @@ -393,12 +440,11 @@ def multicall(context, topic, msg): LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() consumer = DirectConsumer(connection=conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() @@ -462,10 +508,11 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() + ConnectionPool.put(conn) def fanout_cast(context, topic, msg): @@ -511,6 +558,7 @@ def send_message(topic, message, wait=True): if wait: consumer.wait() + consumer.close() if __name__ == '__main__': diff --git a/nova/service.py b/nova/service.py index ab1238c3b..7761cfef5 100644 --- a/nova/service.py +++ b/nova/service.py @@ -91,26 +91,29 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn1 = rpc.Connection.instance(new=True) - conn2 = rpc.Connection.instance(new=True) - conn3 = rpc.Connection.instance(new=True) + if self.report_interval: + conn = rpc.Connection.instance(new=True) + + # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn1, + connection=conn, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn2, + connection=conn, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn3, + connection=conn, topic=self.topic, proxy=self) - self.timers.append(consumer_all.attach_to_eventlet()) - self.timers.append(consumer_node.attach_to_eventlet()) - self.timers.append(fanout.attach_to_eventlet()) + cset = rpc.ConsumerSet(conn, [consumer_all, + consumer_node, + fanout]) + # Wait forever, processing these consumers + greenthread.spawn_n(cset.wait) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) -- cgit From e1a47584cc63136280cf3ca9ef02da3efc1dff7f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: pep8 and comment fixes --- nova/rpc.py | 25 ++++++++++++++++--------- nova/service.py | 1 - 2 files changed, 16 insertions(+), 10 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index 62590ca92..db5aec826 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -92,12 +92,16 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() + class Pool(pools.Pool): + """Class that implements a Pool of Connections""" + def create(self): return Connection.instance(new=True) ConnectionPool = Pool(max_size=20) + class Consumer(messaging.Consumer): """Consumer base class. @@ -171,6 +175,16 @@ class AdapterConsumer(Consumer): self.register_callback(self.process_data) def process_data(self, message_data, message): + """Consumer callback that parses the message for validity and + fires off a thread to call the proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -191,14 +205,8 @@ class AdapterConsumer(Consumer): @exception.wrap_exception def _process_data(self, msg_id, ctxt, method, args): - """Magically looks for a method on the proxy object and calls it. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - + """Thread that maigcally looks for a method on the proxy + object and calls it. """ node_func = getattr(self.proxy, str(method)) @@ -285,7 +293,6 @@ class ConsumerSet(object): except Exception as e: LOG.error(_("Received exception %s " % str(e) + \ "while processing consumer")) - fuck self.reconnect() # Break to outer loop break diff --git a/nova/service.py b/nova/service.py index 7761cfef5..c51c9b066 100644 --- a/nova/service.py +++ b/nova/service.py @@ -91,7 +91,6 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - if self.report_interval: conn = rpc.Connection.instance(new=True) -- cgit From d0be426d4e7bbfb1ecb3f078c71c1e176da441a5 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: convert fanout_cast to ConnectionPool --- nova/rpc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index db5aec826..fdb228695 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -526,10 +526,11 @@ def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = FanoutPublisher(topic, connection=conn) publisher.send(msg) publisher.close() + ConnectionPool.put(conn) def generic_response(message_data, message): -- cgit From f2c2a593c828fc86e298d3eb31672a09b498c41f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: fakerabbit's declare_consumer should support more than 1 consumer. also: make fakerabbit Backend.consume be an iterator like it should be.. --- nova/fakerabbit.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'nova') diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a7dee8caf..a29ba9d86 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -77,6 +77,10 @@ class Queue(object): class Backend(base.BaseBackend): + def __init__(self, connection, **kwargs): + super(Backend, self).__init__(connection, **kwargs) + self.consumers = [] + def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: @@ -97,16 +101,20 @@ class Backend(base.BaseBackend): EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, *args, **kwargs): - self.current_queue = queue - self.current_callback = callback + self.consumers.append((queue, callback)) def consume(self, limit=None): + num = 0 while True: - item = self.get(self.current_queue) - if item: - self.current_callback(item) - raise StopIteration() - greenthread.sleep(0) + for (queue, callback) in self.consumers: + item = self.get(queue) + if item: + callback(item) + num += 1 + yield + if limit and num == limit: + raise StopIteration() + greenthread.sleep(0.1) def get(self, queue, no_ack=False): global QUEUES -- cgit From 90e30806a2e0c235612eb09792656cd861997f84 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 May 2011 15:42:24 -0700 Subject: fix consumers to actually be deleted and clean up cloud test --- nova/fakerabbit.py | 13 +++++++++---- nova/rpc.py | 13 ++++++++++--- nova/service.py | 8 +++----- nova/tests/test_cloud.py | 26 ++++++++++---------------- 4 files changed, 32 insertions(+), 28 deletions(-) (limited to 'nova') diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a29ba9d86..5f3e75c48 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -79,7 +79,7 @@ class Queue(object): class Backend(base.BaseBackend): def __init__(self, connection, **kwargs): super(Backend, self).__init__(connection, **kwargs) - self.consumers = [] + self.consumers = {} def queue_declare(self, queue, **kwargs): global QUEUES @@ -100,13 +100,18 @@ class Backend(base.BaseBackend): ' key %(routing_key)s') % locals()) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) - def declare_consumer(self, queue, callback, *args, **kwargs): - self.consumers.append((queue, callback)) + def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + LOG.debug("Adding consumer %s", consumer_tag) + self.consumers[consumer_tag] = (queue, callback) + + def cancel(self, consumer_tag): + LOG.debug("Removing consumer %s", consumer_tag) + del self.consumers[consumer_tag] def consume(self, limit=None): num = 0 while True: - for (queue, callback) in self.consumers: + for (queue, callback) in self.consumers.itervalues(): item = self.get(queue) if item: callback(item) diff --git a/nova/rpc.py b/nova/rpc.py index fdb228695..e2e962fcc 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -30,11 +30,11 @@ import time import traceback import uuid +import greenlet from carrot import connection as carrot_connection from carrot import messaging import eventlet from eventlet import greenpool -from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -266,6 +266,7 @@ class ConsumerSet(object): def __init__(self, conn, consumer_list): self.consumer_list = set(consumer_list) self.consumer_set = None + self.enabled = True self.init(conn) def init(self, conn): @@ -283,15 +284,21 @@ class ConsumerSet(object): self.init(None) def wait(self, limit=None): - while True: + running = True + while running: it = self.consumer_set.iterconsume(limit=limit) + if not it: + break while True: try: it.next() except StopIteration: return + except greenlet.GreenletExit: + running = False + break except Exception as e: - LOG.error(_("Received exception %s " % str(e) + \ + LOG.error(_("Received exception %s " % type(e) + \ "while processing consumer")) self.reconnect() # Break to outer loop diff --git a/nova/service.py b/nova/service.py index c51c9b066..a0ff7c9f3 100644 --- a/nova/service.py +++ b/nova/service.py @@ -21,12 +21,8 @@ import inspect import os -import sys -import time -from eventlet import event from eventlet import greenthread -from eventlet import greenpool from nova import context from nova import db @@ -112,7 +108,7 @@ class Service(object): consumer_node, fanout]) # Wait forever, processing these consumers - greenthread.spawn_n(cset.wait) + self.csetthread = greenthread.spawn(cset.wait) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) @@ -169,6 +165,8 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" + self.csetthread.kill() + self.csetthread.wait() self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454de..1e14c327c 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -17,13 +17,8 @@ # under the License. from base64 import b64decode -import json from M2Crypto import BIO from M2Crypto import RSA -import os -import shutil -import tempfile -import time from eventlet import greenthread @@ -33,12 +28,10 @@ from nova import db from nova import flags from nova import log as logging from nova import rpc -from nova import service from nova import test from nova import utils from nova import exception from nova.auth import manager -from nova.compute import power_state from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.image import local @@ -79,6 +72,15 @@ class CloudTestCase(test.TestCase): self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish + rpc_cast = rpc.cast + + def finish_cast(*args, **kwargs): + rpc_cast(*args, **kwargs) + greenthread.sleep(0.2) + + self.stubs.Set(rpc, 'cast', finish_cast) + def tearDown(self): network_ref = db.project_get_network(self.context, self.project.id) @@ -113,7 +115,6 @@ class CloudTestCase(test.TestCase): self.cloud.describe_addresses(self.context) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) db.floating_ip_destroy(self.context, address) def test_associate_disassociate_address(self): @@ -129,12 +130,10 @@ class CloudTestCase(test.TestCase): self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) - greenthread.sleep(0.3) self.cloud.disassociate_address(self.context, public_ip=address) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) self.network.deallocate_fixed_ip(self.context, fixed) db.instance_destroy(self.context, inst['id']) db.floating_ip_destroy(self.context, address) @@ -306,31 +305,26 @@ class CloudTestCase(test.TestCase): 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) - greenthread.sleep(0.3) instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_ajax_console(self): + kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] - greenthread.sleep(0.3) output = self.cloud.get_ajax_console(context=self.context, instance_id=[instance_id]) self.assertEquals(output['url'], '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url) # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_key_generation(self): result = self._create_key('test') -- cgit From 8f2557dcd3e3d88c0eabb63bcce90ced79347ae4 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: catch greenlet.GreenletExit when shutting service down --- nova/rpc.py | 2 +- nova/service.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index e2e962fcc..02052ecf5 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -24,13 +24,13 @@ No fan-out support yet. """ +import greenlet import json import sys import time import traceback import uuid -import greenlet from carrot import connection as carrot_connection from carrot import messaging import eventlet diff --git a/nova/service.py b/nova/service.py index a0ff7c9f3..c7e48544c 100644 --- a/nova/service.py +++ b/nova/service.py @@ -19,6 +19,7 @@ """Generic Node baseclass for all workers that run on hosts.""" +import greenlet import inspect import os @@ -166,7 +167,10 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" self.csetthread.kill() - self.csetthread.wait() + try: + self.csetthread.wait() + except greenlet.GreenletExit: + pass self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) -- cgit From 5f3adfc3110ed8095cdac43cc651aa46087c5490 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Always create Service consumers no matter if report_interval is 0 Fix tests to handle how Service loads Consumers now --- nova/service.py | 46 +++++++++++++++++++------------------ nova/tests/test_service.py | 57 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 75 insertions(+), 28 deletions(-) (limited to 'nova') diff --git a/nova/service.py b/nova/service.py index c7e48544c..3a364b6c6 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,29 +88,31 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - if self.report_interval: - conn = rpc.Connection.instance(new=True) - - # Share this same connection for these Consumers - consumer_all = rpc.TopicAdapterConsumer( - connection=conn, - topic=self.topic, - proxy=self) - consumer_node = rpc.TopicAdapterConsumer( - connection=conn, - topic='%s.%s' % (self.topic, self.host), - proxy=self) - fanout = rpc.FanoutAdapterConsumer( - connection=conn, - topic=self.topic, - proxy=self) - - cset = rpc.ConsumerSet(conn, [consumer_all, - consumer_node, - fanout]) - # Wait forever, processing these consumers - self.csetthread = greenthread.spawn(cset.wait) + conn = rpc.Connection.instance(new=True) + logging.debug("Creating Consumer connection for Service %s" % \ + self.topic) + + # Share this same connection for these Consumers + consumer_all = rpc.TopicAdapterConsumer( + connection=conn, + topic=self.topic, + proxy=self) + consumer_node = rpc.TopicAdapterConsumer( + connection=conn, + topic='%s.%s' % (self.topic, self.host), + proxy=self) + fanout = rpc.FanoutAdapterConsumer( + connection=conn, + topic=self.topic, + proxy=self) + + cset = rpc.ConsumerSet(conn, [consumer_all, + consumer_node, + fanout]) + # Wait forever, processing these consumers + self.csetthread = greenthread.spawn(cset.wait) + if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index d48de2057..0bba01d92 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -106,7 +106,10 @@ class ServiceTestCase(test.TestCase): # NOTE(vish): Create was moved out of mox replay to make sure that # the looping calls are created in StartService. - app = service.Service.create(host=host, binary=binary) + app = service.Service.create(host=host, binary=binary, topic=topic) + + self.mox.StubOutWithMock(service.rpc.Connection, 'instance') + service.rpc.Connection.instance(new=mox.IgnoreArg()) self.mox.StubOutWithMock(rpc, 'TopicAdapterConsumer', @@ -114,6 +117,11 @@ class ServiceTestCase(test.TestCase): self.mox.StubOutWithMock(rpc, 'FanoutAdapterConsumer', use_mock_anything=True) + + self.mox.StubOutWithMock(rpc, + 'ConsumerSet', + use_mock_anything=True) + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), topic=topic, proxy=mox.IsA(service.Service)).AndReturn( @@ -129,9 +137,13 @@ class ServiceTestCase(test.TestCase): proxy=mox.IsA(service.Service)).AndReturn( rpc.FanoutAdapterConsumer) - rpc.TopicAdapterConsumer.attach_to_eventlet() - rpc.TopicAdapterConsumer.attach_to_eventlet() - rpc.FanoutAdapterConsumer.attach_to_eventlet() + def wait_func(self, limit=None): + return None + + mock_cset = self.mox.CreateMock(rpc.ConsumerSet, + {'wait': wait_func}) + rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + wait_func(mox.IgnoreArg()) service_create = {'host': host, 'binary': binary, @@ -287,8 +299,41 @@ class ServiceTestCase(test.TestCase): # Creating mocks self.mox.StubOutWithMock(service.rpc.Connection, 'instance') service.rpc.Connection.instance(new=mox.IgnoreArg()) - service.rpc.Connection.instance(new=mox.IgnoreArg()) - service.rpc.Connection.instance(new=mox.IgnoreArg()) + + self.mox.StubOutWithMock(rpc, + 'TopicAdapterConsumer', + use_mock_anything=True) + self.mox.StubOutWithMock(rpc, + 'FanoutAdapterConsumer', + use_mock_anything=True) + + self.mox.StubOutWithMock(rpc, + 'ConsumerSet', + use_mock_anything=True) + + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.TopicAdapterConsumer) + + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), + topic='%s.%s' % (topic, host), + proxy=mox.IsA(service.Service)).AndReturn( + rpc.TopicAdapterConsumer) + + rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.FanoutAdapterConsumer) + + def wait_func(self, limit=None): + return None + + mock_cset = self.mox.CreateMock(rpc.ConsumerSet, + {'wait': wait_func}) + rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + wait_func(mox.IgnoreArg()) + self.mox.StubOutWithMock(serv.manager.driver, 'update_available_resource') serv.manager.driver.update_available_resource(mox.IgnoreArg(), host) -- cgit From 11d3672ad655c39265e5d2477a30db3a12adc65c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Add rpc_conn_pool_size flag for the new connection pool --- nova/rpc.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index 02052ecf5..82869fc46 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -50,7 +50,10 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -99,7 +102,7 @@ class Pool(pools.Pool): def create(self): return Connection.instance(new=True) -ConnectionPool = Pool(max_size=20) +ConnectionPool = Pool(max_size=FLAGS.rpc_conn_pool_size) class Consumer(messaging.Consumer): -- cgit From b193b97054f11664a72cd53547f355d1c9044f88 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: connection pool tests and make the pool LIFO --- nova/rpc.py | 8 +++++++- nova/tests/test_rpc.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index 82869fc46..3cc0dadd4 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -99,10 +99,16 @@ class Connection(carrot_connection.BrokerConnection): class Pool(pools.Pool): """Class that implements a Pool of Connections""" + # TODO(comstud): Timeout connections not used in a while def create(self): return Connection.instance(new=True) -ConnectionPool = Pool(max_size=FLAGS.rpc_conn_pool_size) +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) class Consumer(messaging.Consumer): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index acab3e758..f64209596 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -120,6 +120,48 @@ class RpcTestCase(test.TestCase): "value": value}}) self.assertEqual(value, result) + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection""" + + conn1 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn1) + conn2 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) + + def test_connectionpool_double(self): + """Test that ConnectionPool returns 2 separate connections + when called consecutively and the pool returns connections LIFO + """ + + conn1 = rpc.ConnectionPool.get() + conn2 = rpc.ConnectionPool.get() + + self.assertNotEqual(conn1, conn2) + rpc.ConnectionPool.put(conn1) + rpc.ConnectionPool.put(conn2) + + conn3 = rpc.ConnectionPool.get() + conn4 = rpc.ConnectionPool.get() + self.assertEqual(conn2, conn3) + self.assertEqual(conn1, conn4) + + def test_connectionpool_limit(self): + """Test connection pool limit and verify all connections + are unique + """ + + max_size = FLAGS.rpc_conn_pool_size + conns = [] + + for i in xrange(max_size): + conns.append(rpc.ConnectionPool.get()) + + self.assertFalse(rpc.ConnectionPool.free_items) + self.assertEqual(rpc.ConnectionPool.current_size, + rpc.ConnectionPool.max_size) + self.assertEqual(len(set(conns)), max_size) + class TestReceiver(object): """Simple Proxy class so the consumer has methods to call -- cgit From 51e8eeb9b3a23f811bcbf52d9700d94c5c8b15e4 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: bring back commits lost in merge --- nova/rpc.py | 107 +++++++++++++++++++++++++++++-------------------- nova/tests/test_rpc.py | 19 +++++++++ 2 files changed, 82 insertions(+), 44 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index 3cc0dadd4..d7d7bb014 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -35,6 +35,7 @@ from carrot import connection as carrot_connection from carrot import messaging import eventlet from eventlet import greenpool +from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -140,30 +141,30 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_max_retries) sys.exit(1) - def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - """Wraps the parent fetch with some logic for failed connection.""" - # TODO(vish): the logic for failed connections and logging should be - # refactored into some sort of connection manager object - try: - if self.failed_connection: - # NOTE(vish): connection is defined in the parent class, we can - # recreate it as long as we create the backend too - # pylint: disable=W0201 - self.connection = Connection.recreate() - self.backend = self.connection.create_backend() - self.declare() - return super(Consumer, self).fetch( - no_ack, auto_ack, enable_callbacks) - if self.failed_connection: - LOG.error(_('Reconnected to queue')) - self.failed_connection = False - # NOTE(vish): This is catching all errors because we really don't - # want exceptions to be logged 10 times a second if some - # persistent failure occurs. - except Exception, e: # pylint: disable=W0703 - if not self.failed_connection: - LOG.exception(_('Failed to fetch message from queue: %s' % e)) - self.failed_connection = True + #def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + # """Wraps the parent fetch with some logic for failed connection.""" + # # TODO(vish): the logic for failed connections and logging should be + # # refactored into some sort of connection manager object + # try: + # if self.failed_connection: + # # NOTE(vish): connection is defined in the parent class, we can + # # recreate it as long as we create the backend too + # # pylint: disable=W0201 + # self.connection = Connection.recreate() + # self.backend = self.connection.create_backend() + # self.declare() + # return super(Consumer, self).fetch( + # no_ack, auto_ack, enable_callbacks) + # if self.failed_connection: + # LOG.error(_('Reconnected to queue')) + # self.failed_connection = False + # # NOTE(vish): This is catching all errors because we really don't + # # want exceptions to be logged 10 times a second if some + # # persistent failure occurs. + # except Exception, e: # pylint: disable=W0703 + # if not self.failed_connection: + # LOG.exception(_('Failed to fetch message from queue: %s' % e)) + # self.failed_connection = True def attach_to_eventlet(self): """Only needed for unit tests!""" @@ -195,7 +196,7 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) - msg_id = message_data.pop('_msg_id', None) + msg_id = message_data.get('_msg_id', None) ctxt = _unpack_context(message_data) @@ -225,11 +226,14 @@ class AdapterConsumer(Consumer): rval = node_func(context=ctxt, **node_args) if msg_id: # TODO(termie): re-enable when fix the yielding issue - #if hasattr(rval, 'send'): - # logging.error('rval! %s', rval) - # for x in rval: - # msg_reply(msg_id, x, None) - msg_reply(msg_id, rval, None) + if hasattr(rval, 'send'): + logging.error('rval! %s', rval) + for x in rval: + msg_reply(msg_id, x, None) + msg_reply(msg_id, None, None) + else: + msg_reply(msg_id, rval, None) + #msg_reply(msg_id, rval, None) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -355,7 +359,7 @@ class DirectConsumer(Consumer): self.routing_key = msg_id self.exchange = msg_id self.auto_delete = True - self.exclusive = True + self.exclusive = False super(DirectConsumer, self).__init__(connection=connection) @@ -387,7 +391,9 @@ def msg_reply(msg_id, reply=None, failure=None): publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) + LOG.error('MSG REPLY SUCCESS') except TypeError: + LOG.error('MSG REPLY FAILURE') publisher.send( {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), @@ -440,9 +446,9 @@ def _pack_context(msg, context): for args at some point. """ - context = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) - msg.update(context) + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) class RpcContext(context.RequestContext): @@ -463,12 +469,13 @@ def multicall(context, topic, msg): LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - conn = ConnectionPool.get() - consumer = DirectConsumer(connection=conn, msg_id=msg_id) + con_conn = ConnectionPool.get() + consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - publisher = TopicPublisher(connection=conn, topic=topic) + pub_conn = ConnectionPool.get() + publisher = TopicPublisher(connection=pub_conn, topic=topic) publisher.send(msg) publisher.close() @@ -484,6 +491,7 @@ class MulticallWaiter(object): def close(self): self._closed = True self._consumer.close() + ConnectionPool.put(self._consumer.connection) def __call__(self, data, message): """Acks message and sets result.""" @@ -501,15 +509,26 @@ class MulticallWaiter(object): # trying to solve the problem quickly. This works but # I'd prefer to dig in and do it the best way later on. - def _waiter(): - while not self._closed: - try: - self._consumer.wait(limit=1) - except StopIteration: - pass - eventlet.spawn(_waiter) + #def _waiter(): + # i = 0 + # while not self._closed: + # LOG.error('Iteration #%s (%s)', i, self._consumer.consumer_tag) + # i += 1 + # try: + # self._consumer.wait(limit=1) + # except StopIteration: + # pass + # self._consumer.close() + # ConnectionPool.put(self._consumer.connection) + #eventlet.spawn(_waiter) while True: + rv = None + while rv is None and not self._closed: + rv = self._consumer.fetch(enable_callbacks=True) + time.sleep(0.01) + + LOG.error('RV %s', rv) result = self._results.get() if isinstance(result, Exception): raise result diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index f64209596..e5d99474d 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -61,6 +61,18 @@ class RpcTestCase(test.TestCase): self.assertEqual(value + i, x) i += 1 + def test_multicall_succeed_three_times_yield(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + i = 0 + for x in result: + self.assertEqual(value + i, x) + i += 1 + def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 @@ -83,6 +95,7 @@ class RpcTestCase(test.TestCase): 'test', {"method": "fail", "args": {"value": value}}) + LOG.error('INNNNNNN BETTTWWWWWWWWWWEEEEEEEEEEN') try: rpc.call(self.context, 'test', @@ -186,6 +199,12 @@ class TestReceiver(object): context.reply(value + 1) context.reply(value + 2) + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + @staticmethod def fail(context, value): """Raises an exception with the value sent in""" -- cgit From 64b13a2aad676d2310947e3bf8b9e3dde6b763e7 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: almost everything working with fake_rabbit --- nova/rpc.py | 16 +++++++++++++++- nova/service.py | 22 ++++++++++++++++------ nova/test.py | 1 + nova/tests/test_cloud.py | 4 ++-- 4 files changed, 34 insertions(+), 9 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index d7d7bb014..e1f594a99 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -102,6 +102,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): + LOG.debug('Creating new connection') return Connection.instance(new=True) # Create a ConnectionPool to use for RPC calls. We'll order the @@ -166,6 +167,10 @@ class Consumer(messaging.Consumer): # LOG.exception(_('Failed to fetch message from queue: %s' % e)) # self.failed_connection = True + def close(self, *args, **kwargs): + LOG.debug('Closing consumer %s', self.consumer_tag) + return super(Consumer, self).close(*args, **kwargs) + def attach_to_eventlet(self): """Only needed for unit tests!""" timer = utils.LoopingCall(self.fetch, enable_callbacks=True) @@ -317,6 +322,8 @@ class ConsumerSet(object): # Break to outer loop break + def close(self): + self.consumer_set.close() class Publisher(messaging.Publisher): """Publisher base class.""" @@ -525,12 +532,19 @@ class MulticallWaiter(object): while True: rv = None while rv is None and not self._closed: - rv = self._consumer.fetch(enable_callbacks=True) + try: + rv = self._consumer.fetch(enable_callbacks=True) + except Exception: + self.close() + raise + #rv = self._consumer.fetch(enable_callbacks=True) time.sleep(0.01) LOG.error('RV %s', rv) result = self._results.get() + LOG.error('RESULT %s', result) if isinstance(result, Exception): + self.close() raise result if result == None: self.close() diff --git a/nova/service.py b/nova/service.py index 3a364b6c6..2da510140 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,29 +88,39 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn = rpc.Connection.instance(new=True) + conn1 = rpc.Connection.instance(new=True) + conn2 = rpc.Connection.instance(new=True) + conn3 = rpc.Connection.instance(new=True) logging.debug("Creating Consumer connection for Service %s" % \ self.topic) # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn, + connection=conn1, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn, + connection=conn1, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn, + connection=conn1, topic=self.topic, proxy=self) - cset = rpc.ConsumerSet(conn, [consumer_all, + cset = rpc.ConsumerSet(conn1, [consumer_all, consumer_node, fanout]) # Wait forever, processing these consumers - self.csetthread = greenthread.spawn(cset.wait) + def _wait(): + cset.wait() + cset.close() + + self.csetthread = greenthread.spawn(_wait) + + #self.timers.append(consumer_all.attach_to_eventlet()) + #self.timers.append(consumer_node.attach_to_eventlet()) + #self.timers.append(fanout.attach_to_eventlet()) if self.report_interval: pulse = utils.LoopingCall(self.report_state) diff --git a/nova/test.py b/nova/test.py index 4deb2a175..7b2cf94b6 100644 --- a/nova/test.py +++ b/nova/test.py @@ -85,6 +85,7 @@ class TestCase(unittest.TestCase): self._monkey_patch_attach() self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() + rpc.ConnectionPool = rpc.Pool(max_size=30) def tearDown(self): """Runs after each test method to tear down test environment.""" diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 1e14c327c..a838dd530 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,8 +87,8 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - self.compute.kill() - self.network.kill() + #self.compute.kill() + #self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): -- cgit From e3a88390fd62308cde3d4c597d653c8dc245bed4 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: don't need to use a separate connection --- nova/rpc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index e1f594a99..a212383fd 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -481,8 +481,7 @@ def multicall(context, topic, msg): wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - pub_conn = ConnectionPool.get() - publisher = TopicPublisher(connection=pub_conn, topic=topic) + publisher = TopicPublisher(connection=con_conn, topic=topic) publisher.send(msg) publisher.close() -- cgit From c9b21b0619891c069251c568e4d89be791af56c3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 May 2011 15:42:24 -0700 Subject: lots of fixes for rpc and extra imports --- nova/fakerabbit.py | 12 +++-- nova/rpc.py | 68 +++++++++++------------------ nova/service.py | 9 ++-- nova/test.py | 8 ++-- nova/tests/integrated/integrated_helpers.py | 5 +-- 5 files changed, 45 insertions(+), 57 deletions(-) (limited to 'nova') diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 5f3e75c48..ff993e29a 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -31,6 +31,7 @@ LOG = logging.getLogger("nova.fakerabbit") EXCHANGES = {} QUEUES = {} +CONSUMERS = {} class Message(base.BaseMessage): @@ -101,17 +102,20 @@ class Backend(base.BaseBackend): EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + global CONSUMERS LOG.debug("Adding consumer %s", consumer_tag) - self.consumers[consumer_tag] = (queue, callback) + CONSUMERS[consumer_tag] = (queue, callback) def cancel(self, consumer_tag): + global CONSUMERS LOG.debug("Removing consumer %s", consumer_tag) - del self.consumers[consumer_tag] + del CONSUMERS[consumer_tag] def consume(self, limit=None): + global CONSUMERS num = 0 while True: - for (queue, callback) in self.consumers.itervalues(): + for (queue, callback) in CONSUMERS.itervalues(): item = self.get(queue) if item: callback(item) @@ -147,5 +151,7 @@ class Backend(base.BaseBackend): def reset_all(): global EXCHANGES global QUEUES + global CONSUMERS EXCHANGES = {} QUEUES = {} + CONSUMERS = {} diff --git a/nova/rpc.py b/nova/rpc.py index a212383fd..7faed4d3a 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -33,9 +33,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging -import eventlet from eventlet import greenpool -from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -142,30 +140,30 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_max_retries) sys.exit(1) - #def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - # """Wraps the parent fetch with some logic for failed connection.""" - # # TODO(vish): the logic for failed connections and logging should be - # # refactored into some sort of connection manager object - # try: - # if self.failed_connection: - # # NOTE(vish): connection is defined in the parent class, we can - # # recreate it as long as we create the backend too - # # pylint: disable=W0201 - # self.connection = Connection.recreate() - # self.backend = self.connection.create_backend() - # self.declare() - # return super(Consumer, self).fetch( - # no_ack, auto_ack, enable_callbacks) - # if self.failed_connection: - # LOG.error(_('Reconnected to queue')) - # self.failed_connection = False - # # NOTE(vish): This is catching all errors because we really don't - # # want exceptions to be logged 10 times a second if some - # # persistent failure occurs. - # except Exception, e: # pylint: disable=W0703 - # if not self.failed_connection: - # LOG.exception(_('Failed to fetch message from queue: %s' % e)) - # self.failed_connection = True + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Wraps the parent fetch with some logic for failed connection.""" + # TODO(vish): the logic for failed connections and logging should be + # refactored into some sort of connection manager object + try: + if self.failed_connection: + # NOTE(vish): connection is defined in the parent class, we can + # recreate it as long as we create the backend too + # pylint: disable=W0201 + self.connection = Connection.recreate() + self.backend = self.connection.create_backend() + self.declare() + return super(Consumer, self).fetch( + no_ack, auto_ack, enable_callbacks) + if self.failed_connection: + LOG.error(_('Reconnected to queue')) + self.failed_connection = False + # NOTE(vish): This is catching all errors because we really don't + # want exceptions to be logged 10 times a second if some + # persistent failure occurs. + except Exception, e: # pylint: disable=W0703 + if not self.failed_connection: + LOG.exception(_('Failed to fetch message from queue: %s' % e)) + self.failed_connection = True def close(self, *args, **kwargs): LOG.debug('Closing consumer %s', self.consumer_tag) @@ -325,6 +323,7 @@ class ConsumerSet(object): def close(self): self.consumer_set.close() + class Publisher(messaging.Publisher): """Publisher base class.""" pass @@ -511,23 +510,6 @@ class MulticallWaiter(object): return self.wait() def wait(self): - # TODO(termie): This is probably really a much simpler issue but am - # trying to solve the problem quickly. This works but - # I'd prefer to dig in and do it the best way later on. - - #def _waiter(): - # i = 0 - # while not self._closed: - # LOG.error('Iteration #%s (%s)', i, self._consumer.consumer_tag) - # i += 1 - # try: - # self._consumer.wait(limit=1) - # except StopIteration: - # pass - # self._consumer.close() - # ConnectionPool.put(self._consumer.connection) - #eventlet.spawn(_waiter) - while True: rv = None while rv is None and not self._closed: diff --git a/nova/service.py b/nova/service.py index 2da510140..2626c49ae 100644 --- a/nova/service.py +++ b/nova/service.py @@ -89,8 +89,6 @@ class Service(object): self.manager.update_available_resource(ctxt) conn1 = rpc.Connection.instance(new=True) - conn2 = rpc.Connection.instance(new=True) - conn3 = rpc.Connection.instance(new=True) logging.debug("Creating Consumer connection for Service %s" % \ self.topic) @@ -111,10 +109,13 @@ class Service(object): cset = rpc.ConsumerSet(conn1, [consumer_all, consumer_node, fanout]) + # Wait forever, processing these consumers def _wait(): - cset.wait() - cset.close() + try: + cset.wait() + finally: + cset.close() self.csetthread = greenthread.spawn(_wait) diff --git a/nova/test.py b/nova/test.py index 7b2cf94b6..df48afbb1 100644 --- a/nova/test.py +++ b/nova/test.py @@ -31,17 +31,15 @@ import uuid import unittest import mox -import shutil import stubout from eventlet import greenthread -from nova import context -from nova import db from nova import fakerabbit from nova import flags from nova import rpc from nova import service from nova import wsgi +from nova.virt import fake FLAGS = flags.FLAGS @@ -100,6 +98,10 @@ class TestCase(unittest.TestCase): if FLAGS.fake_rabbit: fakerabbit.reset_all() + if FLAGS.connection_type == 'fake': + if hasattr(fake.FakeConnection, '_instance'): + del fake.FakeConnection._instance + # Reset any overriden flags self.reset_flags() diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index bc98921f0..7f590441e 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -154,10 +154,7 @@ class _IntegratedTestBase(test.TestCase): # set up services self.start_service('compute') self.start_service('volume') - # NOTE(justinsb): There's a bug here which is eluding me... - # If we start the network_service, all is good, but then subsequent - # tests fail: CloudTestCase.test_ajax_console in particular. - #self.start_service('network') + self.start_service('network') self.start_service('scheduler') self._start_api_service() -- cgit From 9334d41c6fe638a3119327702094695cfbd38271 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:25 -0700 Subject: make sure that using multicall on a call with a single result still functions --- nova/rpc.py | 4 ++-- nova/tests/test_rpc.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index 7faed4d3a..84493271f 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -233,10 +233,10 @@ class AdapterConsumer(Consumer): logging.error('rval! %s', rval) for x in rval: msg_reply(msg_id, x, None) - msg_reply(msg_id, None, None) else: msg_reply(msg_id, rval, None) - #msg_reply(msg_id, rval, None) + # This final None tells multicall that it is done. + msg_reply(msg_id, None, None) except Exception as e: logging.exception('Exception during message handling') if msg_id: diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index e5d99474d..c1ef60ff6 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -49,6 +49,35 @@ class RpcTestCase(test.TestCase): "args": {"value": value}}) self.assertEqual(value, result) + def test_call_succeed_despite_multiple_returns(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_multicall_succeed_once(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + i = 0 + for x in result: + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + i += 1 + def test_multicall_succeed_three_times(self): """Get a value through rpc call""" value = 42 -- cgit From c7fe7e5e28b9f4bb999c8309f56953f6609cbc57 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:49 -0700 Subject: cleanup the code for merging --- nova/fakerabbit.py | 4 --- nova/rpc.py | 78 ++++++++++++++++++++++-------------------------- nova/service.py | 23 +++++++------- nova/test.py | 2 +- nova/tests/test_cloud.py | 3 -- nova/tests/test_rpc.py | 1 - 6 files changed, 46 insertions(+), 65 deletions(-) (limited to 'nova') diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index ff993e29a..e7e9dab77 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -78,10 +78,6 @@ class Queue(object): class Backend(base.BaseBackend): - def __init__(self, connection, **kwargs): - super(Backend, self).__init__(connection, **kwargs) - self.consumers = {} - def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: diff --git a/nova/rpc.py b/nova/rpc.py index 84493271f..8d14494f0 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -24,7 +24,6 @@ No fan-out support yet. """ -import greenlet import json import sys import time @@ -36,6 +35,7 @@ from carrot import messaging from eventlet import greenpool from eventlet import pools from eventlet import queue +import greenlet from nova import context from nova import exception @@ -50,9 +50,9 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') + 'Size of RPC thread pool') flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -96,7 +96,7 @@ class Connection(carrot_connection.BrokerConnection): class Pool(pools.Pool): - """Class that implements a Pool of Connections""" + """Class that implements a Pool of Connections.""" # TODO(comstud): Timeout connections not used in a while def create(self): @@ -152,8 +152,9 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - return super(Consumer, self).fetch( - no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch(no_ack, + auto_ack, + enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -165,10 +166,6 @@ class Consumer(messaging.Consumer): LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True - def close(self, *args, **kwargs): - LOG.debug('Closing consumer %s', self.consumer_tag) - return super(Consumer, self).close(*args, **kwargs) - def attach_to_eventlet(self): """Only needed for unit tests!""" timer = utils.LoopingCall(self.fetch, enable_callbacks=True) @@ -188,8 +185,10 @@ class AdapterConsumer(Consumer): self.register_callback(self.process_data) def process_data(self, message_data, message): - """Consumer callback that parses the message for validity and - fires off a thread to call the proxy object method. + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call @@ -199,8 +198,8 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) + # This will be popped off in _unpack_context msg_id = message_data.get('_msg_id', None) - ctxt = _unpack_context(message_data) method = message_data.get('method') @@ -228,13 +227,13 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: - # TODO(termie): re-enable when fix the yielding issue + # Check if the result was a generator if hasattr(rval, 'send'): - logging.error('rval! %s', rval) for x in rval: msg_reply(msg_id, x, None) else: msg_reply(msg_id, rval, None) + # This final None tells multicall that it is done. msg_reply(msg_id, None, None) except Exception as e: @@ -277,7 +276,7 @@ class FanoutAdapterConsumer(AdapterConsumer): class ConsumerSet(object): - """Groups consumers to listen on together on a single connection""" + """Groups consumers to listen on together on a single connection.""" def __init__(self, conn, consumer_list): self.consumer_list = set(consumer_list) @@ -365,7 +364,7 @@ class DirectConsumer(Consumer): self.routing_key = msg_id self.exchange = msg_id self.auto_delete = True - self.exclusive = False + self.exclusive = True super(DirectConsumer, self).__init__(connection=connection) @@ -393,20 +392,18 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = ConnectionPool.get() - publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply, 'failure': failure}) - LOG.error('MSG REPLY SUCCESS') - except TypeError: - LOG.error('MSG REPLY FAILURE') - publisher.send( - {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()), - 'failure': failure}) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = DirectPublisher(connection=conn, msg_id=msg_id) + try: + publisher.send({'result': reply, 'failure': failure}) + except TypeError: + publisher.send( + {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure}) + + publisher.close() class RemoteError(exception.Error): @@ -518,12 +515,9 @@ class MulticallWaiter(object): except Exception: self.close() raise - #rv = self._consumer.fetch(enable_callbacks=True) time.sleep(0.01) - LOG.error('RV %s', rv) result = self._results.get() - LOG.error('RESULT %s', result) if isinstance(result, Exception): self.close() raise result @@ -545,22 +539,20 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = ConnectionPool.get() - publisher = TopicPublisher(connection=conn, topic=topic) - publisher.send(msg) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = TopicPublisher(connection=conn, topic=topic) + publisher.send(msg) + publisher.close() def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = ConnectionPool.get() - publisher = FanoutPublisher(topic, connection=conn) - publisher.send(msg) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = FanoutPublisher(topic, connection=conn) + publisher.send(msg) + publisher.close() def generic_response(message_data, message): diff --git a/nova/service.py b/nova/service.py index 2626c49ae..94afd5f78 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,27 +88,27 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn1 = rpc.Connection.instance(new=True) - logging.debug("Creating Consumer connection for Service %s" % \ - self.topic) + self.conn = rpc.Connection.instance(new=True) + logging.debug("Creating Consumer connection for Service %s" % + self.topic) # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn1, + connection=self.conn, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn1, + connection=self.conn, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn1, + connection=self.conn, topic=self.topic, proxy=self) - cset = rpc.ConsumerSet(conn1, [consumer_all, - consumer_node, - fanout]) + cset = rpc.ConsumerSet(self.conn, [consumer_all, + consumer_node, + fanout]) # Wait forever, processing these consumers def _wait(): @@ -119,10 +119,6 @@ class Service(object): self.csetthread = greenthread.spawn(_wait) - #self.timers.append(consumer_all.attach_to_eventlet()) - #self.timers.append(consumer_node.attach_to_eventlet()) - #self.timers.append(fanout.attach_to_eventlet()) - if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) @@ -185,6 +181,7 @@ class Service(object): except greenlet.GreenletExit: pass self.stop() + rpc.ConnectionPool.put(self.conn) try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: diff --git a/nova/test.py b/nova/test.py index df48afbb1..80b2d0a74 100644 --- a/nova/test.py +++ b/nova/test.py @@ -83,7 +83,7 @@ class TestCase(unittest.TestCase): self._monkey_patch_attach() self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() - rpc.ConnectionPool = rpc.Pool(max_size=30) + rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size) def tearDown(self): """Runs after each test method to tear down test environment.""" diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a838dd530..ca3ef7ffe 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,8 +87,6 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - #self.compute.kill() - #self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): @@ -314,7 +312,6 @@ class CloudTestCase(test.TestCase): rv = self.cloud.terminate_instances(self.context, [instance_id]) def test_ajax_console(self): - kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index c1ef60ff6..fcecfb352 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -124,7 +124,6 @@ class RpcTestCase(test.TestCase): 'test', {"method": "fail", "args": {"value": value}}) - LOG.error('INNNNNNN BETTTWWWWWWWWWWEEEEEEEEEEN') try: rpc.call(self.context, 'test', -- cgit From 7755bbfc7b16248dab23bfab479d09501519290f Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: cleanups --- nova/tests/test_rpc.py | 47 +++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index fcecfb352..8523b409c 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -31,7 +31,6 @@ LOG = logging.getLogger('nova.tests.rpc') class RpcTestCase(test.TestCase): - """Test cases for rpc""" def setUp(self): super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance(True) @@ -43,21 +42,18 @@ class RpcTestCase(test.TestCase): self.context = context.get_admin_context() def test_call_succeed(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo", "args": {"value": value}}) self.assertEqual(value, result) def test_call_succeed_despite_multiple_returns(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times", "args": {"value": value}}) self.assertEqual(value, result) def test_call_succeed_despite_multiple_returns_yield(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times_yield", @@ -65,7 +61,6 @@ class RpcTestCase(test.TestCase): self.assertEqual(value, result) def test_multicall_succeed_once(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -79,7 +74,6 @@ class RpcTestCase(test.TestCase): i += 1 def test_multicall_succeed_three_times(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -91,7 +85,6 @@ class RpcTestCase(test.TestCase): i += 1 def test_multicall_succeed_three_times_yield(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -103,7 +96,7 @@ class RpcTestCase(test.TestCase): i += 1 def test_context_passed(self): - """Makes sure a context is passed through rpc call""" + """Makes sure a context is passed through rpc call.""" value = 42 result = rpc.call(self.context, 'test', {"method": "context", @@ -111,11 +104,12 @@ class RpcTestCase(test.TestCase): self.assertEqual(self.context.to_dict(), result) def test_call_exception(self): - """Test that exception gets passed back properly + """Test that exception gets passed back properly. rpc.call returns a RemoteError object. The value of the exception is converted to a string, so we convert it back to an int in the test. + """ value = 42 self.assertRaises(rpc.RemoteError, @@ -134,7 +128,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(int(exc.value), value) def test_nested_calls(self): - """Test that we can do an rpc.call inside another call""" + """Test that we can do an rpc.call inside another call.""" class Nested(object): @staticmethod def echo(context, queue, value): @@ -162,8 +156,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(value, result) def test_connectionpool_single(self): - """Test that ConnectionPool recycles a single connection""" - + """Test that ConnectionPool recycles a single connection.""" conn1 = rpc.ConnectionPool.get() rpc.ConnectionPool.put(conn1) conn2 = rpc.ConnectionPool.get() @@ -171,10 +164,13 @@ class RpcTestCase(test.TestCase): self.assertEqual(conn1, conn2) def test_connectionpool_double(self): - """Test that ConnectionPool returns 2 separate connections - when called consecutively and the pool returns connections LIFO - """ + """Test that ConnectionPool returns and reuses separate connections. + + When called consecutively we should get separate connections and upon + returning them those connections should be reused for future calls + before generating a new connection. + """ conn1 = rpc.ConnectionPool.get() conn2 = rpc.ConnectionPool.get() @@ -184,14 +180,11 @@ class RpcTestCase(test.TestCase): conn3 = rpc.ConnectionPool.get() conn4 = rpc.ConnectionPool.get() - self.assertEqual(conn2, conn3) - self.assertEqual(conn1, conn4) + self.assertEqual(conn1, conn3) + self.assertEqual(conn2, conn4) def test_connectionpool_limit(self): - """Test connection pool limit and verify all connections - are unique - """ - + """Test connection pool limit and connection uniqueness.""" max_size = FLAGS.rpc_conn_pool_size conns = [] @@ -205,19 +198,21 @@ class RpcTestCase(test.TestCase): class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. - Uses static methods because we aren't actually storing any state""" + """ @staticmethod def echo(context, value): - """Simply returns whatever value is sent in""" + """Simply returns whatever value is sent in.""" LOG.debug(_("Received %s"), value) return value @staticmethod def context(context, value): - """Returns dictionary version of context""" + """Returns dictionary version of context.""" LOG.debug(_("Received %s"), context) return context.to_dict() @@ -235,5 +230,5 @@ class TestReceiver(object): @staticmethod def fail(context, value): - """Raises an exception with the value sent in""" + """Raises an exception with the value sent in.""" raise Exception(value) -- cgit From f56df190ee888ae731740e7e949fb6f0c012d687 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: replace removed import --- nova/tests/test_cloud.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova') diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index ca3ef7ffe..b64be662e 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -19,6 +19,7 @@ from base64 import b64decode from M2Crypto import BIO from M2Crypto import RSA +import os from eventlet import greenthread -- cgit From b3506a471bbce063d72aead211f45d693bda7853 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: don't put connection back in pool --- nova/service.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/service.py b/nova/service.py index 94afd5f78..141fd4253 100644 --- a/nova/service.py +++ b/nova/service.py @@ -181,7 +181,6 @@ class Service(object): except greenlet.GreenletExit: pass self.stop() - rpc.ConnectionPool.put(self.conn) try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: -- cgit From a05e8e7587e42633e8459fd050eee3a4da247330 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: move consumerset killing into stop --- nova/service.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/service.py b/nova/service.py index 141fd4253..782183322 100644 --- a/nova/service.py +++ b/nova/service.py @@ -175,11 +175,6 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" - self.csetthread.kill() - try: - self.csetthread.wait() - except greenlet.GreenletExit: - pass self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) @@ -187,6 +182,11 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): + self.csetthread.kill() + try: + self.csetthread.wait() + except greenlet.GreenletExit: + pass for x in self.timers: try: x.stop() -- cgit From feb04f0117450bcd6e8f4966f4487575073be41c Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: change the behavior of calling a multicall --- nova/rpc.py | 8 +++++--- nova/tests/test_rpc.py | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index 8d14494f0..493978e57 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -236,6 +236,9 @@ class AdapterConsumer(Consumer): # This final None tells multicall that it is done. msg_reply(msg_id, None, None) + elif hasattr(rval, 'send'): + # NOTE(vish): this iterates through the generator + list(rval) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -530,9 +533,8 @@ class MulticallWaiter(object): def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) - for x in rv: - rv.close() - return x + # NOTE(vish): return the last result from the multicall + return list(rv)[-1] def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 8523b409c..35f4a64d9 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -51,14 +51,14 @@ class RpcTestCase(test.TestCase): value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times", "args": {"value": value}}) - self.assertEqual(value, result) + self.assertEqual(value + 2, result) def test_call_succeed_despite_multiple_returns_yield(self): value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times_yield", "args": {"value": value}}) - self.assertEqual(value, result) + self.assertEqual(value + 2, result) def test_multicall_succeed_once(self): value = 42 -- cgit From 61a4dd17be5d89e8aac62d6783310cb5ddb6ee60 Mon Sep 17 00:00:00 2001 From: sateesh Date: Thu, 26 May 2011 10:36:52 +0530 Subject: Instead of redefining the flag 'vlan_interface', just setting a default value (vmnic0) in vmwareapi_net.py --- nova/network/vmwareapi_net.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py index 373060add..04210c011 100644 --- a/nova/network/vmwareapi_net.py +++ b/nova/network/vmwareapi_net.py @@ -30,9 +30,7 @@ LOG = logging.getLogger("nova.network.vmwareapi_net") FLAGS = flags.FLAGS -flags.DEFINE_string('vlan_interface', 'vmnic0', - 'Physical network adapter name in VMware ESX host for ' - 'vlan networking') +FLAGS['vlan_interface'].SetDefault('vmnic0') def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): -- cgit From 34d660128b8fa935dc1a1d5b0b22e8177135d3a8 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 26 May 2011 19:46:11 +0900 Subject: creating _take_action_to_instance to nova.virt.libvirt_conn.py --- nova/virt/libvirt_conn.py | 43 +++++++++++++++---------------------------- 1 file changed, 15 insertions(+), 28 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index aa5e42fc8..4e2e2292e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -45,6 +45,7 @@ import sys import tempfile import time import uuid +import inspect from xml.dom import minidom from xml.etree import ElementTree @@ -548,53 +549,39 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_reboot) return timer.start(interval=0.5, now=True) - @exception.wrap_exception - def pause(self, instance, callback): - """Pause VM instance""" + def _take_action_to_instance(self, action, instance, *arg): + """action VM instance""" if self.read_only: tmpconn = self._connect(self.libvirt_uri, False) dom = tmpconn.lookupByName(instance.name) - dom.suspend() + method = getattr(dom, action) + method(*arg) tmpconn.close() else: dom = self._conn.lookupByName(instance.name) - dom.suspend() + method = getattr(dom, action) + method(*arg) + + @exception.wrap_exception + def pause(self, instance, callback): + """Pause VM instance""" + self._take_action_to_instance("suspend", instance) @exception.wrap_exception def unpause(self, instance, callback): """Unpause paused VM instance""" - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - dom.resume() - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - dom.resume() + self._take_action_to_instance("resume", instance) @exception.wrap_exception def suspend(self, instance, callback): """Suspend the specified instance""" - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - dom.managedSave(0) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - dom.managedSave(0) + self._take_action_to_instance("managedSave", instance, 0) @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" try: - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - dom.create() + self._take_action_to_instance("create", instance) except libvirt.LibvirtError: xml = self.to_xml(instance, None) self._create_new_domain(xml) -- cgit From ce5c7287e06cb7ce1d1a41354a5d6ea073d308d0 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 26 May 2011 20:31:50 +0900 Subject: remove unnecessary import inspect at nova.virt.libvirt_conn --- nova/virt/libvirt_conn.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 4e2e2292e..f9c441505 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -45,7 +45,6 @@ import sys import tempfile import time import uuid -import inspect from xml.dom import minidom from xml.etree import ElementTree -- cgit From 87717c33ae78201a24c0f5a3416ae4b0080e4668 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 26 May 2011 20:49:14 +0900 Subject: replace double quatation to single quatation at nova.virt.libvirt_conn --- nova/virt/libvirt_conn.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index f9c441505..8c9a3550a 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -564,23 +564,23 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def pause(self, instance, callback): """Pause VM instance""" - self._take_action_to_instance("suspend", instance) + self._take_action_to_instance('suspend', instance) @exception.wrap_exception def unpause(self, instance, callback): """Unpause paused VM instance""" - self._take_action_to_instance("resume", instance) + self._take_action_to_instance('resume', instance) @exception.wrap_exception def suspend(self, instance, callback): """Suspend the specified instance""" - self._take_action_to_instance("managedSave", instance, 0) + self._take_action_to_instance('managedSave', instance, 0) @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" try: - self._take_action_to_instance("create", instance) + self._take_action_to_instance('create', instance) except libvirt.LibvirtError: xml = self.to_xml(instance, None) self._create_new_domain(xml) -- cgit From f37d94428dd0b56632958d5d3a6930531a51cd44 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 26 May 2011 10:54:46 -0400 Subject: Restricted image filtering by name and status only --- nova/api/openstack/images.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 553566d58..2e779da79 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -28,8 +28,7 @@ from nova.api.openstack.views import images as images_view LOG = log.getLogger('nova.api.openstack.images') FLAGS = flags.FLAGS -SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', - 'size_min', 'size_max'] +SUPPORTED_FILTERS = ['name', 'status'] class Controller(common.OpenstackController): -- cgit From b9b16ca71d4bbb9782482bdf5d848bb5b787732f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 26 May 2011 13:59:25 -0400 Subject: Expanded tests --- nova/tests/api/openstack/test_images.py | 122 ++++++++++++++++++++++++++++++-- 1 file changed, 116 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index f3f0217d6..9f1f28611 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -709,23 +709,119 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) - def test_get_image_request_filters(self): + def test_image_filter_with_name(self): mocker = mox.Mox() image_service = mocker.CreateMockAnything() context = object() - filters = {'status': 'ACTIVE', - 'name': 'testname', - 'property-test': '3'} + filters = {'name': 'testname'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?name=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_with_status(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?status=ACTIVE') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_with_property(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'property-test': '3'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?property-test=3') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_not_supported(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_no_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_name(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'name': 'testname'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail?name=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_status(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_property(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'property-test': '3'} image_service.detail(context, filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( - '/v1.1/images/detail?status=ACTIVE&name=testname&property-test=3') + '/v1.1/images/detail?property-test=3') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) mocker.VerifyAll() - def test_get_image_request_filters_not_supported(self): + def test_image_detail_filter_not_supported(self): mocker = mox.Mox() image_service = mocker.CreateMockAnything() context = object() @@ -739,6 +835,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): controller.detail(request) mocker.VerifyAll() + def test_image_detail_no_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') res = req.get_response(fakes.wsgi_app()) -- cgit From 899642030dd60541153ccee810d082816f92dd49 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 19:27:27 +0000 Subject: Change the return from glance to be a list of dictionaries describing VDIs Fix the rest of the code to account for this Add a test for swap --- nova/tests/test_xenapi.py | 23 ++++++++++++++++++++ nova/tests/xenapi/stubs.py | 23 +++++++++++++++----- nova/virt/xenapi/fake.py | 5 ++++- nova/virt/xenapi/vm_utils.py | 49 ++++++++++++++++++++++++++--------------- nova/virt/xenapi/vmops.py | 52 ++++++++++++++++++++++++-------------------- 5 files changed, 105 insertions(+), 47 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index be1e35697..18a267896 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -395,6 +395,29 @@ class XenAPIVMTestCase(test.TestCase): os_type="linux") self.check_vm_params_for_linux() + def test_spawn_vhd_glance_swapdisk(self): + # Change the default host_call_plugin to one that'll return + # a swap disk + orig_func = stubs.FakeSessionForVMTests.host_call_plugin + + stubs.FakeSessionForVMTests.host_call_plugin = \ + stubs.FakeSessionForVMTests.host_call_plugin_swap + + try: + # We'll steal the above glance linux test + self.test_spawn_vhd_glance_linux() + finally: + # Make sure to put this back + stubs.FakeSessionForVMTests.host_call_plugin = orig_func + + # We should have 2 VBDs. + self.assertEqual(len(self.vm['VBDs']), 2) + # Now test that we have 1. + self.tearDown() + self.setUp() + self.test_spawn_vhd_glance_linux() + self.assertEqual(len(self.vm['VBDs']), 1) + def test_spawn_vhd_glance_windows(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 9f6f64318..35308d95f 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -38,7 +38,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return {'primary_vdi_uuid': vdi_uuid} + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) @@ -134,16 +134,29 @@ class FakeSessionForVMTests(fake.SessionBase): super(FakeSessionForVMTests, self).__init__(uri) def host_call_plugin(self, _1, _2, plugin, method, _5): + sr_ref = fake.get_all('SR')[0] + vdi_ref = fake.create_vdi('', False, sr_ref, False) + vdi_rec = fake.get_record('VDI', vdi_ref) + if plugin == "glance" and method == "download_vhd": + ret_str = json.dumps([dict(vdi_type='os', + vdi_uuid=vdi_rec['uuid'])]) + else: + ret_str = vdi_rec['uuid'] + return '%s' % ret_str + + def host_call_plugin_swap(self, _1, _2, plugin, method, _5): sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) if plugin == "glance" and method == "download_vhd": swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) - return '%s' % json.dumps( - {'primary_vdi_uuid': vdi_rec['uuid'], - 'swap_vdi_uuid': swap_vdi_rec['uuid']}) - return '%s' % vdi_rec['uuid'] + ret_str = json.dumps( + [dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']), + dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])]) + else: + ret_str = vdi_rec['uuid'] + return '%s' % ret_str def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index e36ef3288..76988b172 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -159,7 +159,10 @@ def after_VBD_create(vbd_ref, vbd_rec): vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] - vm_rec['VBDs'] = [vbd_ref] + if vm_rec.get('VBDs', None): + vm_rec['VBDs'].append(vbd_ref) + else: + vm_rec['VBDs'] = [vbd_ref] vm_name_label = _db_content['VM'][vm_ref]['name_label'] vbd_rec['vm_name_label'] = vm_name_label diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 3d980013a..bee9742a4 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -377,6 +377,9 @@ class VMHelper(HelperBase): xenapi_image_service = ['glance', 'objectstore'] glance_address = 'address for glance services' glance_port = 'port for glance services' + + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise """ access = AuthManager().get_access_key(user, project) @@ -391,6 +394,10 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_glance_vhd(cls, session, instance_id, image, access, image_type): + """Tell glance to download an image and put the VHDs into the SR + + Returns: A list of dictionaries that describe VDIs + """ LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) @@ -410,25 +417,21 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) result = session.wait_for_task(task, instance_id) - vdi_uuids = json.loads(result) - primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') - swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) + vdis = json.loads(result) + for vdi in vdis: + LOG.debug(_("xapi 'download_vhd' returned VDI of " + "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'" % vdi)) cls.scan_sr(session, instance_id, sr_ref) + # Pull out the UUID of the first VDI + vdi_uuid = vdis[0]['vdi_uuid'] # Set the name-label to ease debugging - primary_vdi_ref = session.get_xenapi().VDI.get_by_uuid(primary_vdi_uuid) + vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) primary_name_label = get_name_label_for_image(image) - session.get_xenapi().VDI.set_name_label(primary_vdi_ref, primary_name_label) - - LOG.debug(_("xapi 'download_vhd' returned VDI UUID " - "%(primary_vdi_uuid)s") % locals()) - if swap_vdi_uuid: - LOG.debug(_("xapi 'download_vhd' returned SWAP VDI UUID " - "%(swap_vdi_uuid)s") % locals()) + session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label) - LOG.debug("=" * 100) - return vdi_uuids + return vdis @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, @@ -440,6 +443,8 @@ class VMHelper(HelperBase): plugin; instead, it streams the disks through domU to the VDI directly. + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise """ # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and @@ -486,7 +491,7 @@ class VMHelper(HelperBase): return filename else: vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) - return {'primary_vdi_uuid': vdi_uuid} + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] @classmethod def determine_disk_image_type(cls, instance): @@ -545,6 +550,11 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_glance(cls, session, instance_id, image, access, image_type): + """Fetch image from glance based on image type. + + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise + """ if image_type == ImageType.DISK_VHD: return cls._fetch_image_glance_vhd( session, instance_id, image, access, image_type) @@ -555,6 +565,11 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, secret, image_type): + """Fetch an image from objectstore. + + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise + """ url = images.image_url(image) LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) if image_type == ImageType.KERNEL_RAMDISK: @@ -572,10 +587,10 @@ class VMHelper(HelperBase): if image_type == ImageType.DISK_RAW: args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) - uuid = session.wait_for_task(task, instance_id) + uuid_or_fn = session.wait_for_task(task, instance_id) if image_type != ImageType.KERNEL_RAMDISK: - return {'primary_vdi_uuid': uuid} - return uuid + return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)] + return uuid_or_fn @classmethod def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 2a8d97a9d..02e140dcc 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -91,7 +91,8 @@ class VMOps(object): def finish_resize(self, instance, disk_info): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) - vm_ref = self._create_vm(instance, {'primary_vdi_uuid': vdi_uuid}) + vm_ref = self._create_vm(instance, + [dict(vdi_type='os', vdi_uuid=vdi_uuid)]) self.resize_instance(instance, vdi_uuid) self._spawn(instance, vm_ref) @@ -105,25 +106,25 @@ class VMOps(object): LOG.debug(_("Starting instance %s"), instance.name) self._session.call_xenapi('VM.start', vm_ref, False, False) - def _create_disk(self, instance): + def _create_disks(self, instance): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - vdi_uuids = VMHelper.fetch_image(self._session, + vdis = VMHelper.fetch_image(self._session, instance.id, instance.image_id, user, project, disk_image_type) - return vdi_uuids + return vdis def spawn(self, instance, network_info=None): - vdi_uuids = self._create_disk(instance) - vm_ref = self._create_vm(instance, vdi_uuids, network_info) + vdis = self._create_disks(instance) + vm_ref = self._create_vm(instance, vdis, network_info) self._spawn(instance, vm_ref) def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdi_uuids, network_info=None): + def _create_vm(self, instance, vdis, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -142,15 +143,6 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - # Are we building from a pre-existing disk? - primary_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', - vdi_uuids.get('primary_vdi_uuid')) - swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) - if swap_vdi_uuid: - swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) - else: - swap_vdi_ref = None - disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None @@ -165,17 +157,29 @@ class VMOps(object): instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + # Create the VM ref and attach the first disk + first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdis[0]['vdi_uuid']) use_pv_kernel = VMHelper.determine_is_pv(self._session, - instance.id, primary_vdi_ref, disk_image_type, + instance.id, first_vdi_ref, disk_image_type, instance.os_type) - vm_ref = VMHelper.create_vm(self._session, instance, kernel, - ramdisk, use_pv_kernel) - + vm_ref = VMHelper.create_vm(self._session, instance, + kernel, ramdisk, use_pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) - if swap_vdi_ref: + vdi_ref=first_vdi_ref, userdevice=0, bootable=True) + + # Attach any other disks + # userdevice 1 is reserved for rescue + userdevice = 2 + for vdi in vdis[1:]: + # vdi['vdi_type'] is either 'os' or 'swap', but we don't + # really care what it is right here. + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdi['vdi_uuid']) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=swap_vdi_ref, userdevice=2, bootable=False) + vdi_ref=vdi_ref, userdevice=userdevice, + bootable=False) + userdevice += 1 # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. @@ -185,7 +189,7 @@ class VMOps(object): # Alter the image before VM start for, e.g. network injection if FLAGS.xenapi_inject_image: VMHelper.preconfigure_instance(self._session, instance, - primary_vdi_ref, network_info) + first_vdi_ref, network_info) self.create_vifs(vm_ref, network_info) self.inject_network_info(instance, network_info, vm_ref) -- cgit From fc27a0ac4f907282a669e2c9f3e128890907f236 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 20:21:40 +0000 Subject: add a comment when calling glance:download_vhd so it's clear what is returned --- nova/virt/xenapi/vm_utils.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index bee9742a4..06ee8ee9b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -417,6 +417,10 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) result = session.wait_for_task(task, instance_id) + # 'download_vhd' will return a json encoded string containing + # a list of dictionaries describing VDIs. The dictionary will + # contain 'vdi_type' and 'vdi_uuid' keys. 'vdi_type' can be + # 'os' or 'swap' right now. vdis = json.loads(result) for vdi in vdis: LOG.debug(_("xapi 'download_vhd' returned VDI of " -- cgit From d7e0b45a9bc415e87beee32f10c8d6bdff9819ed Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 26 May 2011 15:08:53 -0700 Subject: changes per review --- nova/rpc.py | 17 ++++++++++------- nova/service.py | 17 ++++++++--------- nova/tests/test_rpc.py | 12 +++--------- nova/tests/test_service.py | 6 ++++-- 4 files changed, 25 insertions(+), 27 deletions(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index 493978e57..1ec495bc8 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -28,6 +28,7 @@ import json import sys import time import traceback +import types import uuid from carrot import connection as carrot_connection @@ -228,7 +229,7 @@ class AdapterConsumer(Consumer): rval = node_func(context=ctxt, **node_args) if msg_id: # Check if the result was a generator - if hasattr(rval, 'send'): + if isinstance(rval, types.GeneratorType): for x in rval: msg_reply(msg_id, x, None) else: @@ -236,7 +237,7 @@ class AdapterConsumer(Consumer): # This final None tells multicall that it is done. msg_reply(msg_id, None, None) - elif hasattr(rval, 'send'): + elif isinstance(rval, types.GeneratorType): # NOTE(vish): this iterates through the generator list(rval) except Exception as e: @@ -281,11 +282,11 @@ class FanoutAdapterConsumer(AdapterConsumer): class ConsumerSet(object): """Groups consumers to listen on together on a single connection.""" - def __init__(self, conn, consumer_list): + def __init__(self, connection, consumer_list): self.consumer_list = set(consumer_list) self.consumer_set = None self.enabled = True - self.init(conn) + self.init(connection) def init(self, conn): if not conn: @@ -316,8 +317,7 @@ class ConsumerSet(object): running = False break except Exception as e: - LOG.error(_("Received exception %s " % type(e) + \ - "while processing consumer")) + LOG.exception(_("Exception while processing consumer")) self.reconnect() # Break to outer loop break @@ -534,7 +534,10 @@ def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) # NOTE(vish): return the last result from the multicall - return list(rv)[-1] + rv = list(rv) + if not rv: + return + return rv[-1] def cast(context, topic, msg): diff --git a/nova/service.py b/nova/service.py index 782183322..74f9f04d8 100644 --- a/nova/service.py +++ b/nova/service.py @@ -105,19 +105,18 @@ class Service(object): connection=self.conn, topic=self.topic, proxy=self) - - cset = rpc.ConsumerSet(self.conn, [consumer_all, - consumer_node, - fanout]) + consumer_set = rpc.ConsumerSet( + connection=self.conn, + consumer_list=[consumer_all, consumer_node, fanout]) # Wait forever, processing these consumers def _wait(): try: - cset.wait() + consumer_set.wait() finally: - cset.close() + consumer_set.close() - self.csetthread = greenthread.spawn(_wait) + self.consumer_set_thread = greenthread.spawn(_wait) if self.report_interval: pulse = utils.LoopingCall(self.report_state) @@ -182,9 +181,9 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): - self.csetthread.kill() + self.consumer_set_thread.kill() try: - self.csetthread.wait() + self.consumer_set_thread.wait() except greenlet.GreenletExit: pass for x in self.timers: diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 35f4a64d9..ffd748efe 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -66,12 +66,10 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): if i > 0: self.fail('should only receive one response') self.assertEqual(value + i, x) - i += 1 def test_multicall_succeed_three_times(self): value = 42 @@ -79,10 +77,8 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): self.assertEqual(value + i, x) - i += 1 def test_multicall_succeed_three_times_yield(self): value = 42 @@ -90,10 +86,8 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times_yield", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): self.assertEqual(value + i, x) - i += 1 def test_context_passed(self): """Makes sure a context is passed through rpc call.""" diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index 0bba01d92..d1cc8bd61 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -142,7 +142,8 @@ class ServiceTestCase(test.TestCase): mock_cset = self.mox.CreateMock(rpc.ConsumerSet, {'wait': wait_func}) - rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + rpc.ConsumerSet(connection=mox.IgnoreArg(), + consumer_list=mox.IsA(list)).AndReturn(mock_cset) wait_func(mox.IgnoreArg()) service_create = {'host': host, @@ -331,7 +332,8 @@ class ServiceTestCase(test.TestCase): mock_cset = self.mox.CreateMock(rpc.ConsumerSet, {'wait': wait_func}) - rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + rpc.ConsumerSet(connection=mox.IgnoreArg(), + consumer_list=mox.IsA(list)).AndReturn(mock_cset) wait_func(mox.IgnoreArg()) self.mox.StubOutWithMock(serv.manager.driver, -- cgit From 103bcae9f172dfee64e7b9235807bcfe1a8aefb3 Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 26 May 2011 17:06:52 -0700 Subject: fix a minor bug unrelated to this change --- nova/rpc.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/rpc.py b/nova/rpc.py index 1ec495bc8..c5277c6a9 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -212,7 +212,9 @@ class AdapterConsumer(Consumer): # we just log the message and send an error string # back to the caller LOG.warn(_('no method for message: %s') % message_data) - msg_reply(msg_id, _('No method for message: %s') % message_data) + if msg_id: + msg_reply(msg_id, + _('No method for message: %s') % message_data) return self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) -- cgit From 6b0ed0cb61838d01b15df26fc32df0de90f1cfbe Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Fri, 27 May 2011 13:20:45 +0900 Subject: Fix a description of 'snapshot_name_template'. --- nova/db/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index 3597732b9..e85ce9f16 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -48,7 +48,7 @@ flags.DEFINE_string('instance_name_template', 'instance-%08x', flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', - 'Template string to be used to generate instance names') + 'Template string to be used to generate snapshot names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], -- cgit From 8b4c91b9f2c28e4809659f199affddbd66482dbb Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Fri, 27 May 2011 13:36:59 +0900 Subject: Fix pep8 violations. --- nova/api/ec2/cloud.py | 13 +++++++++---- .../versions/019_add_volume_snapshot_support.py | 3 +-- nova/db/sqlalchemy/models.py | 1 + nova/tests/test_volume.py | 5 +++-- nova/volume/driver.py | 6 +++--- nova/volume/manager.py | 3 ++- 6 files changed, 19 insertions(+), 12 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 6927d6774..403b7ab40 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -285,7 +285,9 @@ class CloudController(object): snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) - snapshot = self.volume_api.get_snapshot(context, snapshot_id=internal_id) + snapshot = self.volume_api.get_snapshot( + context, + snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) @@ -295,7 +297,8 @@ class CloudController(object): def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x') - s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], 'vol-%08x') + s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], + 'vol-%08x') s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] @@ -308,7 +311,8 @@ class CloudController(object): return s def create_snapshot(self, context, volume_id, **kwargs): - LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) + LOG.audit(_("Create snapshot of volume %s"), volume_id, + context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) snapshot = self.volume_api.create_snapshot( context, @@ -629,7 +633,8 @@ class CloudController(object): else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') != None: - v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], 'snap-%08x') + v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], + 'snap-%08x') else: v['snapshotId'] = None diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py index 5a44bac16..f16d6db56 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py @@ -48,8 +48,7 @@ snapshots = Table('snapshots', meta, unicode_error=None, _warn_on_bytestring=False)), Column('display_description', String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - ) + unicode_error=None, _warn_on_bytestring=False))) def upgrade(migrate_engine): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index b887c5bad..480f62399 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -353,6 +353,7 @@ class Snapshot(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index c66b66959..3472b1f59 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -193,8 +193,9 @@ class VolumeTestCase(test.TestCase): self.volume.create_volume(self.context, volume_id) snapshot_id = self._create_snapshot(volume_id) self.volume.create_snapshot(self.context, volume_id, snapshot_id) - self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), - snapshot_id).id) + self.assertEqual(snapshot_id, + db.snapshot_get(context.get_admin_context(), + snapshot_id).id) self.volume.delete_snapshot(self.context, snapshot_id) self.assertRaises(exception.NotFound, diff --git a/nova/volume/driver.py b/nova/volume/driver.py index e0e18b9bf..21cc228c9 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -97,7 +97,7 @@ class VolumeDriver(object): def _copy_volume(self, srcstr, deststr, size_in_g): self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr, 'count=%d' % (size_in_g * 1024), 'bs=1M') - + def _volume_not_present(self, volume_name): path_name = '%s/%s' % (FLAGS.volume_group, volume_name) try: @@ -115,7 +115,7 @@ class VolumeDriver(object): self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % (FLAGS.volume_group, self._escape_snapshot(volume['name']))) - + def _sizestr(self, size_in_g): if int(size_in_g) == 0: return '100M' @@ -150,7 +150,7 @@ class VolumeDriver(object): out = out.strip() if (out[0] == 'o') or (out[0] == 'O'): raise exception.VolumeIsBusy(volume_name=volume['name']) - + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index fd889633d..40a104d35 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -169,7 +169,8 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) model_update = self.driver.create_snapshot(snapshot_ref) if model_update: - self.db.snapshot_update(context, snapshot_ref['id'], model_update) + self.db.snapshot_update(context, snapshot_ref['id'], + model_update) except Exception: self.db.snapshot_update(context, -- cgit From c229d6e32f5275b2eb10e760f89a52dc31635c47 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Fri, 27 May 2011 14:13:17 +0900 Subject: Fix pep8 errors. --- nova/api/ec2/cloud.py | 7 ++++--- nova/tests/test_volume.py | 10 ++++++---- nova/volume/api.py | 3 ++- nova/volume/driver.py | 4 ++-- nova/volume/manager.py | 5 +++-- 5 files changed, 17 insertions(+), 12 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index b717a10c0..79cc3b3bf 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -666,14 +666,15 @@ class CloudController(object): return v def create_volume(self, context, **kwargs): - size = kwargs.get('size'); + size = kwargs.get('size') if kwargs.get('snapshot_id') != None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) - LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) + LOG.audit(_("Create volume from snapshot %s"), snapshot_id, + context=context) else: snapshot_id = None LOG.audit(_("Create volume of %s GB"), size, context=context) - + volume = self.volume_api.create( context, size=size, diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 8d58b3135..4f10ee6af 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -78,10 +78,12 @@ class VolumeTestCase(test.TestCase): self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) volume_dst_id = self._create_volume(0, snapshot_id) self.volume.create_volume(self.context, volume_dst_id, snapshot_id) - self.assertEqual(volume_dst_id, db.volume_get(context.get_admin_context(), - volume_dst_id).id) - self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), - volume_dst_id).snapshot_id) + self.assertEqual(volume_dst_id, db.volume_get( + context.get_admin_context(), + volume_dst_id).id) + self.assertEqual(snapshot_id, db.volume_get( + context.get_admin_context(), + volume_dst_id).snapshot_id) self.volume.delete_volume(self.context, volume_dst_id) self.volume.delete_snapshot(self.context, snapshot_id) diff --git a/nova/volume/api.py b/nova/volume/api.py index 7fa80383b..5804955f7 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -43,7 +43,8 @@ class API(base.Base): if snapshot_id != None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": - raise exception.ApiError(_("Snapshot status must be available")) + raise exception.ApiError( + _("Snapshot status must be available")) size = snapshot['volume_size'] if quota.allowed_volumes(context, 1, size) < 1: diff --git a/nova/volume/driver.py b/nova/volume/driver.py index df9767a79..87e13277f 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -674,10 +674,10 @@ class SheepdogDriver(VolumeDriver): def create_volume_from_snapshot(self, volume, snapshot): """Creates a sheepdog volume from a snapshot.""" self._try_execute('qemu-img', 'create', '-b', - "sheepdog:%s:%s" % (snapshot['volume_name'], snapshot['name']), + "sheepdog:%s:%s" % (snapshot['volume_name'], + snapshot['name']), "sheepdog:%s" % volume['name']) - def delete_volume(self, volume): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 673771aa7..ff53f0701 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -112,8 +112,9 @@ class VolumeManager(manager.SchedulerDependentManager): model_update = self.driver.create_volume(volume_ref) else: snapshot_ref = self.db.snapshot_get(context, snapshot_id) - model_update = self.driver.create_volume_from_snapshot(volume_ref, - snapshot_ref) + model_update = self.driver.create_volume_from_snapshot( + volume_ref, + snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) -- cgit From ca5a91b3fe6eaa1c2d2b85cb5a11d2bb36e7a436 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 27 May 2011 15:14:16 +0900 Subject: fixed read_only check --- nova/virt/libvirt_conn.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 8c9a3550a..7982611fa 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -564,23 +564,27 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def pause(self, instance, callback): """Pause VM instance""" - self._take_action_to_instance('suspend', instance) + dom = self._lookup_by_name(instance.name) + dom.suspend() @exception.wrap_exception def unpause(self, instance, callback): """Unpause paused VM instance""" - self._take_action_to_instance('resume', instance) + dom = self._lookup_by_name(instance.name) + dom.resume() @exception.wrap_exception def suspend(self, instance, callback): """Suspend the specified instance""" - self._take_action_to_instance('managedSave', instance, 0) + dom = self._lookup_by_name(instance.name) + dom.managedSave(0) @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" try: - self._take_action_to_instance('create', instance) + dom = self._lookup_by_name(instance.name) + dom.create() except libvirt.LibvirtError: xml = self.to_xml(instance, None) self._create_new_domain(xml) -- cgit From a92f2bcbbaa40458e81bad3f6cb21288161322f9 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 06:56:50 +0000 Subject: fix calls to openssl properly now. Only append \n to stdin when decoding. Updated the test slightly, also. --- nova/tests/test_xenapi.py | 1 + nova/virt/xenapi/vmops.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 18a267896..3ba37a762 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -595,6 +595,7 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def test_encryption(self): msg = "This is a top-secret message" enc = self.alice.encrypt(msg) + self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) self.assertEquals(dec, msg) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6d516ddbc..1d8678ce2 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1195,12 +1195,16 @@ class SimpleDH(object): '-nosalt %(dec_flag)s') if which.lower()[0] == 'd': dec_flag = ' -d' + # When decoding base64, we need to make sure there's a + # single '\n' at the end of the base64 encoded data. + # It's kinda dumb that openssl wants to see a newline + text = text.strip('\n') + '\n' else: dec_flag = '' shared = self._shared cmd = base_cmd % locals() proc = _runproc(cmd) - proc.stdin.write(text + '\n') + proc.stdin.write(text) proc.stdin.close() proc.wait() err = proc.stderr.read() -- cgit From 34bd57c380c348fa9c60cf6b3371352da6e8853c Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 27 May 2011 16:03:56 +0900 Subject: remove _take_action_to_instance --- nova/virt/libvirt_conn.py | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 7982611fa..47a77b3ae 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -548,19 +548,6 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_reboot) return timer.start(interval=0.5, now=True) - def _take_action_to_instance(self, action, instance, *arg): - """action VM instance""" - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance.name) - method = getattr(dom, action) - method(*arg) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance.name) - method = getattr(dom, action) - method(*arg) - @exception.wrap_exception def pause(self, instance, callback): """Pause VM instance""" -- cgit From 107b15d2dd7d554d9cca177343ab45c51029d484 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 27 May 2011 10:15:33 -0400 Subject: fix encryption handling of newlines again and restructure the code a bit --- nova/tests/test_xenapi.py | 22 ++++++++++++++++++---- nova/virt/xenapi/vmops.py | 24 +++++++----------------- 2 files changed, 25 insertions(+), 21 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 3ba37a762..0632d05a5 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -592,12 +592,26 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): bob_shared = self.bob.compute_shared(alice_pub) self.assertEquals(alice_shared, bob_shared) - def test_encryption(self): - msg = "This is a top-secret message" - enc = self.alice.encrypt(msg) + def _test_encryption(self, message): + enc = self.alice.encrypt(message) self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) - self.assertEquals(dec, msg) + self.assertEquals(dec, message) + + def test_encrypt_simple_message(self): + self._test_encryption('This is a simple message.') + + def test_encrypt_message_with_newlines_at_end(self): + self._test_encryption('This message has a newline at the end.\n') + + def test_encrypt_many_newlines_at_end(self): + self._test_encryption('Message with lotsa newlines.\n\n\n') + + def test_encrypt_newlines_inside_message(self): + self._test_encryption('Message\nwith\ninterior\nnewlines.') + + def test_encrypt_with_leading_newlines(self): + self._test_encryption('\n\nMessage with leading newlines.') def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1d8678ce2..ce84c8652 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1190,30 +1190,20 @@ class SimpleDH(object): mpi = M2Crypto.m2.bn_to_mpi(bn) return mpi - def _run_ssl(self, text, which): - base_cmd = ('openssl enc -aes-128-cbc -a -pass pass:%(shared)s ' - '-nosalt %(dec_flag)s') - if which.lower()[0] == 'd': - dec_flag = ' -d' - # When decoding base64, we need to make sure there's a - # single '\n' at the end of the base64 encoded data. - # It's kinda dumb that openssl wants to see a newline - text = text.strip('\n') + '\n' - else: - dec_flag = '' - shared = self._shared - cmd = base_cmd % locals() - proc = _runproc(cmd) + def _run_ssl(self, subcommand, text): + proc = _runproc('openssl %s' % subcommand) proc.stdin.write(text) proc.stdin.close() proc.wait() err = proc.stderr.read() if err: raise RuntimeError(_('OpenSSL error: %s') % err) - return proc.stdout.read().strip('\n') + return proc.stdout.read() def encrypt(self, text): - return self._run_ssl(text, 'enc') + cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt' % self._shared + return self._run_ssl(cmd, text).strip('\n') def decrypt(self, text): - return self._run_ssl(text, 'dec') + cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt -d' % self._shared + return self._run_ssl(cmd, text) -- cgit From 28b19b9e20100236f98e04cc43bcf106768ff2bb Mon Sep 17 00:00:00 2001 From: "Dave Walker (Daviey)" Date: Fri, 27 May 2011 15:28:10 +0100 Subject: nova/auth/novarc.template: Changed NOVA_KEY_DIR to allow symlink support --- nova/auth/novarc.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index cda2ecc28..8170fcafe 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,4 @@ -NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null) +NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE})) export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" -- cgit From f6d847cd867c09319f9fc451c09dc7322542e26b Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 27 May 2011 10:40:50 -0400 Subject: prevent encryption from adding newlines on long messages --- nova/tests/test_xenapi.py | 4 ++++ nova/virt/xenapi/vmops.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 0632d05a5..fe37f0ebe 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -594,6 +594,7 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def _test_encryption(self, message): enc = self.alice.encrypt(message) + print enc self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) self.assertEquals(dec, message) @@ -613,6 +614,9 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def test_encrypt_with_leading_newlines(self): self._test_encryption('\n\nMessage with leading newlines.') + def test_encrypt_really_long_message(self): + self._test_encryption(''.join(['abcd' for i in xrange(1024)])) + def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index ce84c8652..1fcaaeede 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1201,7 +1201,7 @@ class SimpleDH(object): return proc.stdout.read() def encrypt(self, text): - cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt' % self._shared + cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt' % self._shared return self._run_ssl(cmd, text).strip('\n') def decrypt(self, text): -- cgit From 60a291747eeded09ade608088eae47fdb300a56b Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 27 May 2011 10:41:12 -0400 Subject: remove errant print statement --- nova/tests/test_xenapi.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index fe37f0ebe..9d56c1644 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -594,7 +594,6 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def _test_encryption(self, message): enc = self.alice.encrypt(message) - print enc self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) self.assertEquals(dec, message) -- cgit From 1af3ac5f60bb9a4ad201f0bd84a355235be2f354 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 19:50:57 +0000 Subject: fixed so all the new encryption tests pass.. including data with newlines and so forth --- nova/virt/xenapi/vmops.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1fcaaeede..e116ef2d1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1190,8 +1190,12 @@ class SimpleDH(object): mpi = M2Crypto.m2.bn_to_mpi(bn) return mpi - def _run_ssl(self, subcommand, text): - proc = _runproc('openssl %s' % subcommand) + def _run_ssl(self, text, extra_args=None): + if not extra_args: + extra_args = '' + cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt %s' % ( + self._shared, extra_args) + proc = _runproc('openssl %s' % cmd) proc.stdin.write(text) proc.stdin.close() proc.wait() @@ -1201,9 +1205,9 @@ class SimpleDH(object): return proc.stdout.read() def encrypt(self, text): - cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt' % self._shared - return self._run_ssl(cmd, text).strip('\n') + return self._run_ssl(text).strip('\n') def decrypt(self, text): - cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt -d' % self._shared - return self._run_ssl(cmd, text) + if text[len(text)-1:] != '\n': + text = text + '\n' + return self._run_ssl(text, '-d') -- cgit From cb42d3ec2c358a1666fde06d4252d1d76baeffff Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 20:29:48 +0000 Subject: added -A back in to pass to openssl --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index e116ef2d1..389c27598 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1193,7 +1193,7 @@ class SimpleDH(object): def _run_ssl(self, text, extra_args=None): if not extra_args: extra_args = '' - cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt %s' % ( + cmd = 'enc -aes-128-cbc -A -a -pass pass:%s -nosalt %s' % ( self._shared, extra_args) proc = _runproc('openssl %s' % cmd) proc.stdin.write(text) -- cgit From 132d0579a11b5f3b0be930e5a9369205cb282e35 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 20:48:57 +0000 Subject: added \n is not needed with -A --- nova/virt/xenapi/vmops.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 389c27598..2b3fb6a39 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1208,6 +1208,4 @@ class SimpleDH(object): return self._run_ssl(text).strip('\n') def decrypt(self, text): - if text[len(text)-1:] != '\n': - text = text + '\n' return self._run_ssl(text, '-d') -- cgit From a9278909cbb6d5ea9283231dbd6efc67b812abff Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 23:10:42 -0400 Subject: Update the rebuild_instance function in the compute manager so that it accepts the arguments that our current compute API sends. --- nova/compute/manager.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d1e01f275..3897b3a9e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -331,7 +331,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock - def rebuild_instance(self, context, instance_id, image_id): + def rebuild_instance(self, context, instance_id, **kwargs): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and @@ -349,7 +349,8 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id, power_state.BUILDING) self.driver.destroy(instance_ref) - instance_ref.image_id = image_id + instance_ref.image_id = kwargs.get('image_id') + instance_ref.injected_files = kwargs.get('injected_files', []) self.driver.spawn(instance_ref) self._update_image_id(context, instance_id, image_id) -- cgit From ccf522daaca0d4136c072c1905dd9fbaa1dfb2e9 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 23:12:07 -0400 Subject: Fixes to the SQLAlchmeny API such that metadata is saved on an instance_update. Added integration test to test that instance metadata is updated on a rebuild. --- nova/db/sqlalchemy/api.py | 22 +++++++++++++--------- nova/tests/integrated/api/client.py | 10 ++++++++-- nova/tests/integrated/test_servers.py | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 11 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e4dda5c12..1a7cae6e9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -771,6 +771,15 @@ def fixed_ip_update(context, address, values): ################### +def _metadata_refs(metadata_dict): + metadata_refs = [] + if metadata_dict: + for k, v in metadata_dict.iteritems(): + metadata_ref = models.InstanceMetadata() + metadata_ref['key'] = k + metadata_ref['value'] = v + metadata_refs.append(metadata_ref) + return metadata_refs @require_context @@ -780,15 +789,7 @@ def instance_create(context, values): context - request context object values - dict containing column values. """ - metadata = values.get('metadata') - metadata_refs = [] - if metadata: - for k, v in metadata.iteritems(): - metadata_ref = models.InstanceMetadata() - metadata_ref['key'] = k - metadata_ref['value'] = v - metadata_refs.append(metadata_ref) - values['metadata'] = metadata_refs + values['metadata'] = _metadata_refs(values.get('metadata')) instance_ref = models.Instance() instance_ref.update(values) @@ -1010,6 +1011,9 @@ def instance_set_state(context, instance_id, state, description=None): @require_context def instance_update(context, instance_id, values): session = get_session() + metadata = values.get('metadata') + if metadata: + values['metadata'] = _metadata_refs(values.get('metadata')) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index 7e20c9b00..eb9a3056e 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -152,7 +152,10 @@ class TestOpenStackClient(object): def _decode_json(self, response): body = response.read() LOG.debug(_("Decoding JSON: %s") % (body)) - return json.loads(body) + if body: + return json.loads(body) + else: + return "" def api_get(self, relative_uri, **kwargs): kwargs.setdefault('check_response_status', [200]) @@ -166,7 +169,7 @@ class TestOpenStackClient(object): headers['Content-Type'] = 'application/json' kwargs['body'] = json.dumps(body) - kwargs.setdefault('check_response_status', [200]) + kwargs.setdefault('check_response_status', [200, 202]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) @@ -185,6 +188,9 @@ class TestOpenStackClient(object): def post_server(self, server): return self.api_post('/servers', server)['server'] + def post_server_action(self, server_id, data): + return self.api_post('/servers/%s/action' % server_id, data) + def delete_server(self, server_id): return self.api_delete('/servers/%s' % server_id) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index e89d0100a..604faf59f 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -179,6 +179,40 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Cleanup self._delete_server(created_server_id) + def test_create_and_rebuild_server_with_metadata(self): + """Rebuild a server with metadata.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah" + } + + metadata = {} + for i in range(30): + metadata['key_%s' % i] = 'value_%s' % i + + post['rebuild']['metadata'] = metadata + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual(metadata, found_server.get('metadata')) + + # Cleanup + self._delete_server(created_server_id) if __name__ == "__main__": unittest.main() -- cgit From 833481d796db557dddde6b4b9e75b7cf518b88fa Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sun, 29 May 2011 07:51:44 -0400 Subject: Use metadata variable when calling _metadata_refs. --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1a7cae6e9..a678ebedd 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1013,7 +1013,7 @@ def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') if metadata: - values['metadata'] = _metadata_refs(values.get('metadata')) + values['metadata'] = _metadata_refs(metadata) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) -- cgit From 45818393a20a56d5e0aab23f3c78e430e0c1167a Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 30 May 2011 14:17:00 +0900 Subject: fixed nova.virt.libvirt_conn.resume() method - removing try-catch --- nova/virt/libvirt_conn.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 47a77b3ae..32f374955 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -569,12 +569,8 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def resume(self, instance, callback): """resume the specified instance""" - try: - dom = self._lookup_by_name(instance.name) - dom.create() - except libvirt.LibvirtError: - xml = self.to_xml(instance, None) - self._create_new_domain(xml) + dom = self._lookup_by_name(instance.name) + dom.create() @exception.wrap_exception def rescue(self, instance): -- cgit From 2155f2b1ab22c6183ab5266e16a675f1469fca50 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 30 May 2011 11:29:55 -0400 Subject: Updates so that 'name' can be updated when doing a OS API v1.1 rebuild. Fixed issue where metadata wasn't getting deleted when an empty dict was POST'd on a rebuild. --- nova/api/openstack/servers.py | 10 +++-- nova/compute/api.py | 13 ++++--- nova/db/sqlalchemy/api.py | 17 +++++++-- nova/tests/integrated/test_servers.py | 72 +++++++++++++++++++++++++++++++++++ 4 files changed, 100 insertions(+), 12 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 5c10fc916..8e191c232 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -708,14 +708,16 @@ class ControllerV11(Controller): image_id = common.get_id_from_href(image_ref) personalities = info["rebuild"].get("personality", []) - metadata = info["rebuild"].get("metadata", {}) + metadata = info["rebuild"].get("metadata") + name = info["rebuild"].get("name") - self._validate_metadata(metadata) + if metadata: + self._validate_metadata(metadata) self._decode_personalities(personalities) try: - self.compute_api.rebuild(context, instance_id, image_id, metadata, - personalities) + self.compute_api.rebuild(context, instance_id, image_id, name, + metadata, personalities) except exception.BuildInProgress: msg = _("Instance %d is currently being rebuilt.") % instance_id LOG.debug(msg) diff --git a/nova/compute/api.py b/nova/compute/api.py index 4f2363387..151679521 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -530,7 +530,7 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) - def rebuild(self, context, instance_id, image_id, metadata=None, + def rebuild(self, context, instance_id, image_id, name=None, metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) @@ -539,13 +539,16 @@ class API(base.Base): msg = _("Instance already building") raise exception.BuildInProgress(msg) - metadata = metadata or {} - self._check_metadata_properties_quota(context, metadata) - files_to_inject = files_to_inject or [] self._check_injected_file_quota(context, files_to_inject) - self.db.instance_update(context, instance_id, {"metadata": metadata}) + values = {} + if metadata is not None: + self._check_metadata_properties_quota(context, metadata) + values['metadata'] = metadata + if name is not None: + values['display_name'] = name + self.db.instance_update(context, instance_id, values) rebuild_params = { "image_id": image_id, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a678ebedd..ea84e96e7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1012,8 +1012,9 @@ def instance_set_state(context, instance_id, state, description=None): def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') - if metadata: - values['metadata'] = _metadata_refs(metadata) + if metadata is not None: + instance_metadata_update_or_create(context, instance_id, + values.pop('metadata'), True) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) @@ -2570,8 +2571,12 @@ def instance_metadata_get_item(context, instance_id, key): @require_context -def instance_metadata_update_or_create(context, instance_id, metadata): +def instance_metadata_update_or_create(context, instance_id, metadata, + purge=False): session = get_session() + + original_metadata = instance_metadata_get(context, instance_id) + meta_ref = None for key, value in metadata.iteritems(): try: @@ -2583,4 +2588,10 @@ def instance_metadata_update_or_create(context, instance_id, metadata): "instance_id": instance_id, "deleted": 0}) meta_ref.save(session=session) + + if purge: + for key in original_metadata.keys(): + if not key in metadata.keys(): + instance_metadata_delete(context, instance_id, key) + return metadata diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 604faf59f..a67fa1bb5 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -179,6 +179,36 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Cleanup self._delete_server(created_server_id) + def test_create_and_rebuild_server(self): + """Rebuild a server.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah" + } + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual({}, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) + + # Cleanup + self._delete_server(created_server_id) + def test_create_and_rebuild_server_with_metadata(self): """Rebuild a server with metadata.""" @@ -210,9 +240,51 @@ class ServersTest(integrated_helpers._IntegratedTestBase): found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) self.assertEqual(metadata, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) + + # Cleanup + self._delete_server(created_server_id) + + def test_create_and_rebuild_server_with_metadata_removal(self): + """Rebuild a server with metadata.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + + metadata = {} + for i in range(30): + metadata['key_%s' % i] = 'value_%s' % i + + server_post['server']['metadata'] = metadata + + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah" + } + + metadata = {} + post['rebuild']['metadata'] = metadata + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual(metadata, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) # Cleanup self._delete_server(created_server_id) + if __name__ == "__main__": unittest.main() -- cgit From be9113bc5c08cbafb7af9f83bd61f318d1ba6145 Mon Sep 17 00:00:00 2001 From: "Vivek YS vivek.ys@gmail.com" <> Date: Tue, 31 May 2011 09:49:06 +0530 Subject: Fixed the typo of APIError with ApiError --- nova/virt/vmwareapi/vmops.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index c3e79a92f..6d7149841 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -590,11 +590,11 @@ class VMWareVMOps(object): def pause(self, instance, callback): """Pause a VM instance.""" - raise exception.APIError("pause not supported for vmwareapi") + raise exception.ApiError("pause not supported for vmwareapi") def unpause(self, instance, callback): """Un-Pause a VM instance.""" - raise exception.APIError("unpause not supported for vmwareapi") + raise exception.ApiError("unpause not supported for vmwareapi") def suspend(self, instance, callback): """Suspend the specified instance.""" @@ -673,7 +673,7 @@ class VMWareVMOps(object): def get_diagnostics(self, instance): """Return data about VM diagnostics.""" - raise exception.APIError("get_diagnostics not implemented for " + raise exception.ApiError("get_diagnostics not implemented for " "vmwareapi") def get_console_output(self, instance): -- cgit From 7beafb1aafac97e6dfc28108062785465cc8f577 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 31 May 2011 14:38:12 -0400 Subject: Use a new instance_metadata_delete_all DB api call to delete existing metadata when updating a server. --- nova/db/sqlalchemy/api.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ea84e96e7..8df96cbf4 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1013,8 +1013,9 @@ def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: + instance_metadata_delete_all(context, instance_id) instance_metadata_update_or_create(context, instance_id, - values.pop('metadata'), True) + values.pop('metadata')) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) @@ -2554,6 +2555,17 @@ def instance_metadata_delete(context, instance_id, key): 'updated_at': literal_column('updated_at')}) +@require_context +def instance_metadata_delete_all(context, instance_id): + session = get_session() + session.query(models.InstanceMetadata).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + @require_context def instance_metadata_get_item(context, instance_id, key): session = get_session() @@ -2571,8 +2583,7 @@ def instance_metadata_get_item(context, instance_id, key): @require_context -def instance_metadata_update_or_create(context, instance_id, metadata, - purge=False): +def instance_metadata_update_or_create(context, instance_id, metadata): session = get_session() original_metadata = instance_metadata_get(context, instance_id) @@ -2589,9 +2600,4 @@ def instance_metadata_update_or_create(context, instance_id, metadata, "deleted": 0}) meta_ref.save(session=session) - if purge: - for key in original_metadata.keys(): - if not key in metadata.keys(): - instance_metadata_delete(context, instance_id, key) - return metadata -- cgit