diff options
Diffstat (limited to 'nova')
| -rw-r--r-- | nova/api/openstack/__init__.py | 4 | ||||
| -rw-r--r-- | nova/api/openstack/backup_schedules.py | 15 | ||||
| -rw-r--r-- | nova/api/openstack/images.py | 85 | ||||
| -rw-r--r-- | nova/api/openstack/ratelimiting/__init__.py | 4 | ||||
| -rw-r--r-- | nova/api/openstack/servers.py | 62 | ||||
| -rw-r--r-- | nova/api/openstack/sharedipgroups.py | 39 | ||||
| -rw-r--r-- | nova/compute/api.py | 29 | ||||
| -rw-r--r-- | nova/compute/manager.py | 33 | ||||
| -rw-r--r-- | nova/compute/power_state.py | 4 | ||||
| -rw-r--r-- | nova/tests/api/openstack/test_images.py | 18 | ||||
| -rw-r--r-- | nova/tests/api/openstack/test_servers.py | 32 | ||||
| -rw-r--r-- | nova/tests/test_compute.py | 12 | ||||
| -rw-r--r-- | nova/tests/test_virt.py | 2 | ||||
| -rw-r--r-- | nova/tests/test_xenapi.py | 1 | ||||
| -rw-r--r-- | nova/tests/xenapi/stubs.py | 13 | ||||
| -rw-r--r-- | nova/utils.py | 3 | ||||
| -rw-r--r-- | nova/virt/fake.py | 12 | ||||
| -rw-r--r-- | nova/virt/libvirt_conn.py | 37 | ||||
| -rw-r--r-- | nova/virt/xenapi/fake.py | 1 | ||||
| -rw-r--r-- | nova/virt/xenapi/vm_utils.py | 88 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 51 | ||||
| -rw-r--r-- | nova/virt/xenapi/volume_utils.py | 6 | ||||
| -rw-r--r-- | nova/virt/xenapi_conn.py | 8 | ||||
| -rw-r--r-- | nova/wsgi.py | 2 |
24 files changed, 463 insertions, 98 deletions
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index c49399f28..66aceee2d 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -93,12 +93,14 @@ class APIRouter(wsgi.Router): logging.debug("Including admin operations in API.") server_members['pause'] = 'POST' server_members['unpause'] = 'POST' + server_members['suspend'] = 'POST' + server_members['resume'] = 'POST' mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, member=server_members) - mapper.resource("backup_schedule", "backup_schedules", + mapper.resource("backup_schedule", "backup_schedule", controller=backup_schedules.Controller(), parent_resource=dict(member_name='server', collection_name='servers')) diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index fc70b5c6c..fcc07bdd3 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -23,13 +23,25 @@ from nova.api.openstack import faults import nova.image.service +def _translate_keys(inst): + """ Coerces the backup schedule into proper dictionary format """ + return dict(backupSchedule=inst) + + class Controller(wsgi.Controller): + """ The backup schedule API controller for the Openstack API """ + + _serialization_metadata = { + 'application/xml': { + 'attributes': { + 'backupSchedule': []}}} def __init__(self): pass def index(self, req, server_id): - return faults.Fault(exc.HTTPNotFound()) + """ Returns the list of backup schedules for a given instance """ + return _translate_keys({}) def create(self, req, server_id): """ No actual update method required, since the existing API allows @@ -37,4 +49,5 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) def delete(self, req, server_id, id): + """ Deletes an existing backup schedule """ return faults.Fault(exc.HTTPNotFound()) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index d3312aba8..ba35fbc78 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -30,6 +30,65 @@ from nova.api.openstack import faults FLAGS = flags.FLAGS +def _translate_keys(item): + """ + Maps key names to Rackspace-like attributes for return + also pares down attributes to those we want + item is a dict + + Note: should be removed when the set of keys expected by the api + and the set of keys returned by the image service are equivalent + + """ + # TODO(tr3buchet): this map is specific to s3 object store, + # replace with a list of keys for _filter_keys later + mapped_keys = {'status': 'imageState', + 'id': 'imageId', + 'name': 'imageLocation'} + + mapped_item = {} + # TODO(tr3buchet): + # this chunk of code works with s3 and the local image service/glance + # when we switch to glance/local image service it can be replaced with + # a call to _filter_keys, and mapped_keys can be changed to a list + try: + for k, v in mapped_keys.iteritems(): + # map s3 fields + mapped_item[k] = item[v] + except KeyError: + # return only the fields api expects + mapped_item = _filter_keys(item, mapped_keys.keys()) + + return mapped_item + + +def _translate_status(item): + """ + Translates status of image to match current Rackspace api bindings + item is a dict + + Note: should be removed when the set of statuses expected by the api + and the set of statuses returned by the image service are equivalent + + """ + status_mapping = { + 'pending': 'queued', + 'decrypting': 'preparing', + 'untarring': 'saving', + 'available': 'active'} + item['status'] = status_mapping[item['status']] + return item + + +def _filter_keys(item, keys): + """ + Filters all model attributes except for keys + item is a dict + + """ + return dict((k, v) for k, v in item.iteritems() if k in keys) + + class Controller(wsgi.Controller): _serialization_metadata = { @@ -42,25 +101,25 @@ class Controller(wsgi.Controller): self._service = utils.import_object(FLAGS.image_service) def index(self, req): - """Return all public images in brief.""" - return dict(images=[dict(id=img['id'], name=img['name']) - for img in self.detail(req)['images']]) + """Return all public images in brief""" + items = self._service.index(req.environ['nova.context']) + items = common.limited(items, req) + items = [_filter_keys(item, ('id', 'name')) for item in items] + return dict(images=items) def detail(self, req): - """Return all public images in detail.""" + """Return all public images in detail""" try: - images = self._service.detail(req.environ['nova.context']) - images = common.limited(images, req) + items = self._service.detail(req.environ['nova.context']) except NotImplementedError: - # Emulate detail() using repeated calls to show() - ctxt = req.environ['nova.context'] - images = self._service.index(ctxt) - images = common.limited(images, req) - images = [self._service.show(ctxt, i['id']) for i in images] - return dict(images=images) + items = self._service.index(req.environ['nova.context']) + items = common.limited(items, req) + items = [_translate_keys(item) for item in items] + items = [_translate_status(item) for item in items] + return dict(images=items) def show(self, req, id): - """Return data about the given image id.""" + """Return data about the given image id""" return dict(image=self._service.show(req.environ['nova.context'], id)) def delete(self, req, id): diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 91a8b2e55..cbb4b897e 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -64,9 +64,9 @@ class RateLimitingMiddleware(wsgi.Middleware): If the request should be rate limited, return a 413 status with a Retry-After header giving the time when the request would succeed. """ - return self.limited_request(req, self.application) + return self.rate_limited_request(req, self.application) - def limited_request(self, req, application): + def rate_limited_request(self, req, application): """Rate limit the request. If the request should be rate limited, return a 413 status with a diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8d60e2cab..845183258 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -35,18 +35,16 @@ LOG = logging.getLogger('server') LOG.setLevel(logging.DEBUG) -def _entity_list(entities): - """ Coerces a list of servers into proper dictionary format """ - return dict(servers=entities) - - -def _entity_detail(inst): - """ Maps everything to Rackspace-like attributes for return""" +def _translate_detail_keys(inst): + """ Coerces into dictionary format, mapping everything to Rackspace-like + attributes for return""" power_mapping = { + None: 'build', power_state.NOSTATE: 'build', power_state.RUNNING: 'active', power_state.BLOCKED: 'active', - power_state.PAUSED: 'suspended', + power_state.SUSPENDED: 'suspended', + power_state.PAUSED: 'error', power_state.SHUTDOWN: 'active', power_state.SHUTOFF: 'active', power_state.CRASHED: 'error'} @@ -66,8 +64,9 @@ def _entity_detail(inst): return dict(server=inst_dict) -def _entity_inst(inst): - """ Filters all model attributes save for id and name """ +def _translate_keys(inst): + """ Coerces into dictionary format, excluding all model attributes + save for id and name """ return dict(server=dict(id=inst['internal_id'], name=inst['display_name'])) @@ -86,29 +85,29 @@ class Controller(wsgi.Controller): def index(self, req): """ Returns a list of server names and ids for a given user """ - return self._items(req, entity_maker=_entity_inst) + return self._items(req, entity_maker=_translate_keys) def detail(self, req): """ Returns a list of server details for a given user """ - return self._items(req, entity_maker=_entity_detail) + return self._items(req, entity_maker=_translate_detail_keys) def _items(self, req, entity_maker): """Returns a list of servers for a given user. - entity_maker - either _entity_detail or _entity_inst + entity_maker - either _translate_detail_keys or _translate_keys """ instance_list = self.compute_api.get_instances( req.environ['nova.context']) limited_list = common.limited(instance_list, req) res = [entity_maker(inst)['server'] for inst in limited_list] - return _entity_list(res) + return dict(servers=res) def show(self, req, id): """ Returns server details by server id """ try: instance = self.compute_api.get_instance( req.environ['nova.context'], int(id)) - return _entity_detail(instance) + return _translate_detail_keys(instance) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) @@ -137,7 +136,7 @@ class Controller(wsgi.Controller): description=env['server']['name'], key_name=key_pair['name'], key_data=key_pair['public_key']) - return _entity_inst(instances[0]) + return _translate_keys(instances[0]) def update(self, req, id): """ Updates the server name or password """ @@ -152,8 +151,9 @@ class Controller(wsgi.Controller): update_dict['display_name'] = inst_dict['server']['name'] try: - self.compute_api.update_instance(req.environ['nova.context'], - instance['id'], + ctxt = req.environ['nova.context'] + self.compute_api.update_instance(ctxt, + id, **update_dict) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) @@ -182,7 +182,7 @@ class Controller(wsgi.Controller): self.compute_api.pause(ctxt, id) except: readable = traceback.format_exc() - logging.error("Compute.api::pause %s", readable) + logging.error(_("Compute.api::pause %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -193,6 +193,28 @@ class Controller(wsgi.Controller): self.compute_api.unpause(ctxt, id) except: readable = traceback.format_exc() - logging.error("Compute.api::unpause %s", readable) + logging.error(_("Compute.api::unpause %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def suspend(self, req, id): + """permit admins to suspend the server""" + context = req.environ['nova.context'] + try: + self.compute_api.suspend(context, id) + except: + readable = traceback.format_exc() + logging.error(_("compute.api::suspend %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def resume(self, req, id): + """permit admins to resume the server from suspend""" + context = req.environ['nova.context'] + try: + self.compute_api.resume(context, id) + except: + readable = traceback.format_exc() + logging.error(_("compute.api::resume %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/sharedipgroups.py index 75d02905c..845f5bead 100644 --- a/nova/api/openstack/sharedipgroups.py +++ b/nova/api/openstack/sharedipgroups.py @@ -15,26 +15,51 @@ # License for the specific language governing permissions and limitations # under the License. +from webob import exc + from nova import wsgi +from nova.api.openstack import faults + + +def _translate_keys(inst): + """ Coerces a shared IP group instance into proper dictionary format """ + return dict(sharedIpGroup=inst) + + +def _translate_detail_keys(inst): + """ Coerces a shared IP group instance into proper dictionary format with + correctly mapped attributes """ + return dict(sharedIpGroup=inst) class Controller(wsgi.Controller): """ The Shared IP Groups Controller for the Openstack API """ + _serialization_metadata = { + 'application/xml': { + 'attributes': { + 'sharedIpGroup': []}}} + def index(self, req): - raise NotImplementedError + """ Returns a list of Shared IP Groups for the user """ + return dict(sharedIpGroups=[]) def show(self, req, id): - raise NotImplementedError + """ Shows in-depth information on a specific Shared IP Group """ + return _translate_keys({}) def update(self, req, id): - raise NotImplementedError + """ You can't update a Shared IP Group """ + raise faults.Fault(exc.HTTPNotImplemented()) def delete(self, req, id): - raise NotImplementedError + """ Deletes a Shared IP Group """ + raise faults.Fault(exc.HTTPNotFound()) - def detail(self, req): - raise NotImplementedError + def detail(self, req, id): + """ Returns a complete list of Shared IP Groups """ + return _translate_detail_keys({}) def create(self, req): - raise NotImplementedError + """ Creates a new Shared IP group """ + raise faults.Fault(exc.HTTPNotFound()) diff --git a/nova/compute/api.py b/nova/compute/api.py index cc377a1e4..789514967 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -92,15 +92,16 @@ class ComputeAPI(base.Base): is_vpn = image_id == FLAGS.vpn_image_id if not is_vpn: image = self.image_service.show(context, image_id) - - # If kernel_id/ramdisk_id isn't explicitly set in API call - # we take the defaults from the image's metadata if kernel_id is None: kernel_id = image.get('kernelId', None) if ramdisk_id is None: ramdisk_id = image.get('ramdiskId', None) - - # Make sure we have access to kernel and ramdisk + #No kernel and ramdisk for raw images + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + logging.debug("Creating a raw instance") + # Make sure we have access to kernel and ramdisk (if not raw) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: @@ -285,6 +286,24 @@ class ComputeAPI(base.Base): {"method": "unpause_instance", "args": {"instance_id": instance['id']}}) + def suspend(self, context, instance_id): + """suspend the instance with instance_id""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "suspend_instance", + "args": {"instance_id": instance['id']}}) + + def resume(self, context, instance_id): + """resume the instance with instance_id""" + instance = self.db.instance_get_by_internal_id(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "resume_instance", + "args": {"instance_id": instance['id']}}) + def rescue(self, context, instance_id): """Rescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ff8202cca..70b175e7c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -297,6 +297,39 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + def suspend_instance(self, context, instance_id): + """suspend the instance with instance_id""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug(_('instance %s: suspending'), + instance_ref['internal_id']) + self.db.instance_set_state(context, instance_id, + power_state.NOSTATE, + 'suspending') + self.driver.suspend(instance_ref, + lambda result: self._update_state_callback(self, + context, + instance_id, + result)) + + @exception.wrap_exception + def resume_instance(self, context, instance_id): + """resume the suspended instance with instance_id""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug(_('instance %s: resuming'), instance_ref['internal_id']) + self.db.instance_set_state(context, instance_id, + power_state.NOSTATE, + 'resuming') + self.driver.resume(instance_ref, + lambda result: self._update_state_callback(self, + context, + instance_id, + result)) + + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py index cefdf2d9e..37039d2ec 100644 --- a/nova/compute/power_state.py +++ b/nova/compute/power_state.py @@ -26,6 +26,7 @@ PAUSED = 0x03 SHUTDOWN = 0x04 SHUTOFF = 0x05 CRASHED = 0x06 +SUSPENDED = 0x07 def name(code): @@ -36,5 +37,6 @@ def name(code): PAUSED: 'paused', SHUTDOWN: 'shutdown', SHUTOFF: 'shutdown', - CRASHED: 'crashed'} + CRASHED: 'crashed', + SUSPENDED: 'suspended'} return d[code] diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index f610cbf9c..1b4031217 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -223,6 +223,20 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): res = req.get_response(nova.api.API('os')) res_dict = json.loads(res.body) + def _is_equivalent_subset(x, y): + if set(x) <= set(y): + for k, v in x.iteritems(): + if x[k] != y[k]: + if x[k] == 'active' and y[k] == 'available': + continue + return False + return True + return False + for image in res_dict['images']: - self.assertEquals(1, self.IMAGE_FIXTURES.count(image), - "image %s not in fixtures!" % str(image)) + for image_fixture in self.IMAGE_FIXTURES: + if _is_equivalent_subset(image, image_fixture): + break + else: + self.assertEquals(1, 2, "image %s not in fixtures!" % + str(image)) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3820f5f27..5d23db588 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -88,9 +88,13 @@ class ServersTest(unittest.TestCase): self.stubs.Set(nova.db.api, 'instance_get_floating_address', instance_address) self.stubs.Set(nova.compute.api.ComputeAPI, 'pause', - fake_compute_api) + fake_compute_api) self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause', - fake_compute_api) + fake_compute_api) + self.stubs.Set(nova.compute.api.ComputeAPI, 'suspend', + fake_compute_api) + self.stubs.Set(nova.compute.api.ComputeAPI, 'resume', + fake_compute_api) self.allow_admin = FLAGS.allow_admin_api def tearDown(self): @@ -246,6 +250,30 @@ class ServersTest(unittest.TestCase): res = req.get_response(nova.api.API('os')) self.assertEqual(res.status_int, 202) + def test_server_suspend(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/suspend') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(nova.api.API('os')) + self.assertEqual(res.status_int, 202) + + def test_server_resume(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/resume') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(nova.api.API('os')) + self.assertEqual(res.status_int, 202) + def test_server_reboot(self): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 348bb3351..bcb8a1526 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -101,13 +101,13 @@ class ComputeTestCase(test.TestCase): self.compute.run_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) - logging.info("Running instances: %s", instances) + logging.info(_("Running instances: %s"), instances) self.assertEqual(len(instances), 1) self.compute.terminate_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) - logging.info("After terminating instances: %s", instances) + logging.info(_("After terminating instances: %s"), instances) self.assertEqual(len(instances), 0) def test_run_terminate_timestamps(self): @@ -136,6 +136,14 @@ class ComputeTestCase(test.TestCase): self.compute.unpause_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + def test_suspend(self): + """ensure instance can be suspended""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.suspend_instance(self.context, instance_id) + self.compute.resume_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + def test_reboot(self): """Ensure instance can be rebooted""" instance_id = self._create_instance() diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 8dab8de2f..4aa489d08 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -33,6 +33,7 @@ flags.DECLARE('instances_path', 'nova.compute.manager') class LibvirtConnTestCase(test.TestCase): def setUp(self): super(LibvirtConnTestCase, self).setUp() + libvirt_conn._late_load_cheetah() self.flags(fake_call=True) self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake', @@ -157,7 +158,6 @@ class LibvirtConnTestCase(test.TestCase): (lambda t: t.find('./devices/serial/source').get( 'path').split('/')[1], 'console.log'), (lambda t: t.find('./memory').text, '2097152')] - if rescue: common_checks += [ (lambda t: t.findall('./devices/disk/source')[0].get( diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index b5d3ea395..ed2e4ffde 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -48,6 +48,7 @@ class XenAPIVolumeTestCase(test.TestCase): FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' fakes.stub_out_db_instance_api(self.stubs) + stubs.stub_out_get_target(self.stubs) fake.reset() self.values = {'name': 1, 'id': 1, 'project_id': 'fake', diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 1dacad6a3..a7e592fee 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -18,12 +18,13 @@ from nova.virt import xenapi_conn from nova.virt.xenapi import fake +from nova.virt.xenapi import volume_utils def stubout_session(stubs, cls): - """ Stubs out two methods from XenAPISession """ + """Stubs out two methods from XenAPISession""" def fake_import(self): - """ Stubs out get_imported_xenapi of XenAPISession """ + """Stubs out get_imported_xenapi of XenAPISession""" fake_module = 'nova.virt.xenapi.fake' from_list = ['fake'] return __import__(fake_module, globals(), locals(), from_list, -1) @@ -34,6 +35,14 @@ def stubout_session(stubs, cls): fake_import) +def stub_out_get_target(stubs): + """Stubs out _get_target in volume_utils""" + def fake_get_target(volume_id): + return (None, None) + + stubs.Set(volume_utils, '_get_target', fake_get_target) + + class FakeSessionForVMTests(fake.SessionBase): """ Stubs out a XenAPISession for VM tests """ def __init__(self, uri): diff --git a/nova/utils.py b/nova/utils.py index b9045a50c..15112faa2 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -48,7 +48,8 @@ def import_class(import_str): try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) - except (ImportError, ValueError, AttributeError): + except (ImportError, ValueError, AttributeError), exc: + logging.debug(_('Inner Exception: %s'), exc) raise exception.NotFound(_('Class %s cannot be found') % class_str) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 238acf798..706888b0d 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -148,6 +148,18 @@ class FakeConnection(object): """ pass + def suspend(self, instance, callback): + """ + suspend the specified instance + """ + pass + + def resume(self, instance, callback): + """ + resume the specified instance + """ + pass + def destroy(self, instance): """ Destroy (shutdown and delete) the specified instance. diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 651b2af93..f6a218fa4 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -58,10 +58,9 @@ from nova.compute import instance_types from nova.compute import power_state from nova.virt import images -from Cheetah.Template import Template - libvirt = None libxml2 = None +Template = None FLAGS = flags.FLAGS @@ -88,15 +87,26 @@ flags.DEFINE_bool('allow_project_net_traffic', def get_connection(read_only): # These are loaded late so that there's no need to install these # libraries when not using libvirt. + # Cheetah is separate because the unit tests want to load Cheetah, + # but not libvirt. global libvirt global libxml2 if libvirt is None: libvirt = __import__('libvirt') if libxml2 is None: libxml2 = __import__('libxml2') + _late_load_cheetah() return LibvirtConnection(read_only) +def _late_load_cheetah(): + global Template + if Template is None: + t = __import__('Cheetah.Template', globals(), locals(), ['Template'], + -1) + Template = t.Template + + def _get_net_and_mask(cidr): net = IPy.IP(cidr) return str(net.net()), str(net.netmask()) @@ -280,6 +290,14 @@ class LibvirtConnection(object): raise exception.APIError("unpause not supported for libvirt.") @exception.wrap_exception + def suspend(self, instance, callback): + raise exception.APIError("suspend not supported for libvirt") + + @exception.wrap_exception + def resume(self, instance, callback): + raise exception.APIError("resume not supported for libvirt") + + @exception.wrap_exception def rescue(self, instance): self.destroy(instance, False) @@ -512,9 +530,10 @@ class LibvirtConnection(object): if FLAGS.allow_project_net_traffic: net, mask = _get_net_and_mask(network['cidr']) - extra_params = ("<parameter name=\"PROJNET\" value=\"%s\" />\n" - "<parameter name=\"PROJMASK\" value=\"%s\" />\n" - ) % (net, mask) + extra_params = ("<parameter name=\"PROJNET\" " + "value=\"%s\" />\n" + "<parameter name=\"PROJMASK\" " + "value=\"%s\" />\n") % (net, mask) else: extra_params = "\n" @@ -800,8 +819,8 @@ class NWFilterFirewall(object): the base filter are all in place. """ - nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" - ) % instance['name'] + nwfilter_xml = ("<filter name='nova-instance-%s' " + "chain='root'>\n") % instance['name'] if instance['image_id'] == FLAGS.vpn_image_id: nwfilter_xml += " <filterref filter='nova-vpn' />\n" @@ -814,8 +833,8 @@ class NWFilterFirewall(object): for security_group in instance.security_groups: self.ensure_security_group_filter(security_group['id']) - nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n" - ) % security_group['id'] + nwfilter_xml += (" <filterref filter='nova-secgroup-%d' " + "/>\n") % security_group['id'] nwfilter_xml += "</filter>" self._define_filter(nwfilter_xml) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 7a6c9ee71..1eaf31c25 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -235,6 +235,7 @@ class SessionBase(object): elif '.' in name: impl = getattr(self, name.replace('.', '_')) if impl is not None: + def callit(*params): logging.warn('Calling %s %s', name, impl) self._check_session(params) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 89e02c917..47fb6db53 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -39,19 +39,35 @@ XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, 'Paused': power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME + 'Suspended': power_state.SUSPENDED, 'Crashed': power_state.CRASHED} +class ImageType: + """ + Enumeration class for distinguishing different image types + 0 - kernel/ramdisk image (goes on dom0's filesystem) + 1 - disk image (local SR, partitioned by objectstore plugin) + 2 - raw disk image (local SR, NOT partitioned by plugin) + """ + + KERNEL_RAMDISK = 0 + DISK = 1 + DISK_RAW = 2 + + class VMHelper(HelperBase): """ The class that wraps the helper methods together. """ @classmethod - def create_vm(cls, session, instance, kernel, ramdisk): + def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False): """Create a VM record. Returns a Deferred that gives the new - VM reference.""" + VM reference. + the pv_kernel flag indicates whether the guest is HVM or PV + """ + instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) @@ -70,9 +86,9 @@ class VMHelper(HelperBase): 'actions_after_reboot': 'restart', 'actions_after_crash': 'destroy', 'PV_bootloader': '', - 'PV_kernel': kernel, - 'PV_ramdisk': ramdisk, - 'PV_args': 'root=/dev/xvda1', + 'PV_kernel': '', + 'PV_ramdisk': '', + 'PV_args': '', 'PV_bootloader_args': '', 'PV_legacy_args': '', 'HVM_boot_policy': '', @@ -84,7 +100,25 @@ class VMHelper(HelperBase): 'user_version': '0', 'other_config': {}, } - logging.debug(_('Created VM %s...'), instance.name) + #Complete VM configuration record according to the image type + #non-raw/raw with PV kernel/raw in HVM mode + if instance.kernel_id: + rec['PV_bootloader'] = '' + rec['PV_kernel'] = kernel + rec['PV_ramdisk'] = ramdisk + rec['PV_args'] = 'root=/dev/xvda1' + rec['PV_bootloader_args'] = '' + rec['PV_legacy_args'] = '' + else: + if pv_kernel: + rec['PV_args'] = 'noninteractive' + rec['PV_bootloader'] = 'pygrub' + else: + rec['HVM_boot_policy'] = 'BIOS order' + rec['HVM_boot_params'] = {'order': 'dc'} + rec['platform'] = {'acpi': 'true', 'apic': 'true', + 'pae': 'true', 'viridian': 'true'} + logging.debug('Created VM %s...', instance.name) vm_ref = session.call_xenapi('VM.create', rec) logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref) return vm_ref @@ -170,22 +204,24 @@ class VMHelper(HelperBase): return vif_ref @classmethod - def fetch_image(cls, session, image, user, project, use_sr): - """use_sr: True to put the image as a VDI in an SR, False to place - it on dom0's filesystem. The former is for VM disks, the latter for - its kernel and ramdisk (if external kernels are being used). - Returns a Deferred that gives the new VDI UUID.""" - + def fetch_image(cls, session, image, user, project, type): + """ + type is interpreted as an ImageType instance + """ url = images.image_url(image) access = AuthManager().get_access_key(user, project) - logging.debug(_("Asking xapi to fetch %s as %s"), url, access) - fn = use_sr and 'get_vdi' or 'get_kernel' + logging.debug("Asking xapi to fetch %s as %s", url, access) + fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url args['username'] = access args['password'] = user.secret - if use_sr: + args['add_partition'] = 'false' + args['raw'] = 'false' + if type != ImageType.KERNEL_RAMDISK: args['add_partition'] = 'true' + if type == ImageType.DISK_RAW: + args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) #FIXME(armando): find a solution to missing instance_id #with Josh Kearney @@ -193,6 +229,22 @@ class VMHelper(HelperBase): return uuid @classmethod + def lookup_image(cls, session, vdi_ref): + logging.debug("Looking up vdi %s for PV kernel", vdi_ref) + fn = "is_vdi_pv" + args = {} + args['vdi-ref'] = vdi_ref + #TODO: Call proper function in plugin + task = session.async_call_plugin('objectstore', fn, args) + pv_str = session.wait_for_task(task) + if pv_str.lower() == 'true': + pv = True + elif pv_str.lower() == 'false': + pv = False + logging.debug("PV Kernel in VDI:%d", pv) + return pv + + @classmethod def lookup(cls, session, i): """Look the instance i up, and returns it if available""" vms = session.get_xenapi().VM.get_by_name_label(i) @@ -231,6 +283,10 @@ class VMHelper(HelperBase): @classmethod def compile_info(cls, record): """Fill record with VM status information""" + logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"), + record['power_state']) + logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"), + XENAPI_POWER_STATE[record['power_state']]) return {'state': XENAPI_POWER_STATE[record['power_state']], 'max_mem': long(record['memory_static_max']) >> 10, 'mem': long(record['memory_dynamic_max']) >> 10, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 39bb0f1e8..ba502ffa2 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -29,6 +29,7 @@ from nova.auth.manager import AuthManager from nova.compute import power_state from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper +from nova.virt.xenapi.vm_utils import ImageType class VMOps(object): @@ -64,16 +65,30 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - vdi_uuid = VMHelper.fetch_image( - self._session, instance.image_id, user, project, True) - kernel = VMHelper.fetch_image( - self._session, instance.kernel_id, user, project, False) - ramdisk = VMHelper.fetch_image( - self._session, instance.ramdisk_id, user, project, False) + #if kernel is not present we must download a raw disk + if instance.kernel_id: + disk_image_type = ImageType.DISK + else: + disk_image_type = ImageType.DISK_RAW + vdi_uuid = VMHelper.fetch_image(self._session, + instance.image_id, user, project, disk_image_type) vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - vm_ref = VMHelper.create_vm( - self._session, instance, kernel, ramdisk) + #Have a look at the VDI and see if it has a PV kernel + pv_kernel = False + if not instance.kernel_id: + pv_kernel = VMHelper.lookup_image(self._session, vdi_ref) + kernel = None + if instance.kernel_id: + kernel = VMHelper.fetch_image(self._session, + instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) + ramdisk = None + if instance.ramdisk_id: + ramdisk = VMHelper.fetch_image(self._session, + instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + vm_ref = VMHelper.create_vm(self._session, + instance, kernel, ramdisk, pv_kernel) VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + if network_ref: VMHelper.create_vif(self._session, vm_ref, network_ref, instance.mac_address) @@ -173,6 +188,26 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.unpause', vm) self._wait_with_callback(instance.id, task, callback) + def suspend(self, instance, callback): + """suspend the specified instance""" + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception(_("suspend: instance not present %s") % + instance_name) + task = self._session.call_xenapi('Async.VM.suspend', vm) + self._wait_with_callback(task, callback) + + def resume(self, instance, callback): + """resume the specified instance""" + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception(_("resume: instance not present %s") % + instance_name) + task = self._session.call_xenapi('Async.VM.resume', vm, False, True) + self._wait_with_callback(task, callback) + def get_info(self, instance_id): """Return data about VM instance""" vm = VMHelper.lookup(self._session, instance_id) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index a0c0a67d4..1ca813bcf 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -60,13 +60,11 @@ class VolumeHelper(HelperBase): 'port': info['targetPort'], 'targetIQN': info['targetIQN'], 'chapuser': info['chapuser'], - 'chappassword': info['chappassword'] - } + 'chappassword': info['chappassword']} else: record = {'target': info['targetHost'], 'port': info['targetPort'], - 'targetIQN': info['targetIQN'] - } + 'targetIQN': info['targetIQN']} try: sr_ref = session.get_xenapi().SR.create( session.get_xenapi_host(), diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 11c66c974..7f03d6c2b 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -147,6 +147,14 @@ class XenAPIConnection(object): """Unpause paused VM instance""" self._vmops.unpause(instance, callback) + def suspend(self, instance, callback): + """suspend the specified instance""" + self._vmops.suspend(instance, callback) + + def resume(self, instance, callback): + """resume the specified instance""" + self._vmops.resume(instance, callback) + def get_info(self, instance_id): """Return data about VM instance""" return self._vmops.get_info(instance_id) diff --git a/nova/wsgi.py b/nova/wsgi.py index c7ee9ed14..b5d6b96c1 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -270,7 +270,7 @@ class Serializer(object): needed to serialize a dictionary to that type. """ self.metadata = metadata or {} - req = webob.Request(environ) + req = webob.Request.blank('', environ) suffix = req.path_info.split('.')[-1].lower() if suffix == 'json': self.handler = self._to_json |
