diff options
| author | Soren Hansen <soren@linux2go.dk> | 2011-03-09 23:39:12 +0100 |
|---|---|---|
| committer | Soren Hansen <soren@linux2go.dk> | 2011-03-09 23:39:12 +0100 |
| commit | c887b043ebc65789b0d8471d676ea7725263c4f3 (patch) | |
| tree | 5ebc1da745e763db5b3cb6aa07dcf84fe486af40 | |
| parent | 4e9c570fbf8b3987d556da085b61f159f32c16f1 (diff) | |
| parent | 97983b7725a65482627aa32db1ea0e67ca7b3584 (diff) | |
| download | nova-c887b043ebc65789b0d8471d676ea7725263c4f3.tar.gz nova-c887b043ebc65789b0d8471d676ea7725263c4f3.tar.xz nova-c887b043ebc65789b0d8471d676ea7725263c4f3.zip | |
Merge trunk
65 files changed, 1873 insertions, 550 deletions
@@ -15,6 +15,7 @@ <corywright@gmail.com> <cory.wright@rackspace.com> <devin.carlen@gmail.com> <devcamcar@illian.local> <ewan.mellor@citrix.com> <emellor@silver> +<itoumsn@nttdata.co.jp> <itoumsn@shayol> <jaypipes@gmail.com> <jpipes@serialcoder> <jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local> <jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com> @@ -39,6 +39,7 @@ Ken Pepple <ken.pepple@gmail.com> Kevin L. Mitchell <kevin.mitchell@rackspace.com> Koji Iida <iida.koji@lab.ntt.co.jp> Lorin Hochstein <lorin@isi.edu> +Masanori Itoh <itoumsn@nttdata.co.jp> Matt Dietz <matt.dietz@rackspace.com> Michael Gundlach <michael.gundlach@rackspace.com> Monsyne Dragon <mdragon@rackspace.com> diff --git a/bin/nova-api b/bin/nova-api index 14be4b841..06bb855cb 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -36,49 +36,15 @@ gettext.install('nova', unicode=1) from nova import flags from nova import log as logging +from nova import service from nova import utils from nova import version from nova import wsgi + LOG = logging.getLogger('nova.api') FLAGS = flags.FLAGS -flags.DEFINE_string('ec2_listen', "0.0.0.0", - 'IP address for EC2 API to listen') -flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen') -flags.DEFINE_string('osapi_listen', "0.0.0.0", - 'IP address for OpenStack API to listen') -flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen') -flags.DEFINE_flag(flags.HelpFlag()) -flags.DEFINE_flag(flags.HelpshortFlag()) -flags.DEFINE_flag(flags.HelpXMLFlag()) - -API_ENDPOINTS = ['ec2', 'osapi'] - - -def run_app(paste_config_file): - LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file) - apps = [] - for api in API_ENDPOINTS: - config = wsgi.load_paste_configuration(paste_config_file, api) - if config is None: - LOG.debug(_("No paste configuration for app: %s"), api) - continue - LOG.debug(_("App Config: %(api)s\n%(config)r") % locals()) - LOG.info(_("Running %s API"), api) - app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_listen_port" % api), - getattr(FLAGS, "%s_listen" % api))) - if len(apps) == 0: - LOG.error(_("No known API applications configured in %s."), - paste_config_file) - return - - server = wsgi.Server() - for app in apps: - server.start(*app) - server.wait() - if __name__ == '__main__': utils.default_flagfile() @@ -90,8 +56,6 @@ if __name__ == '__main__': for flag in FLAGS: flag_get = FLAGS.get(flag, None) LOG.debug("%(flag)s : %(flag_get)s" % locals()) - conf = wsgi.paste_config_file('nova-api.conf') - if conf: - run_app(conf) - else: - LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf') + + service = service.serve_wsgi(service.ApiService) + service.wait() diff --git a/bin/nova-manage b/bin/nova-manage index 9bf3a1bb3..7dfc3c045 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -545,6 +545,15 @@ class NetworkCommands(object): network.dhcp_start, network.dns) + def delete(self, fixed_range): + """Deletes a network""" + network = db.network_get_by_cidr(context.get_admin_context(), \ + fixed_range) + if network.project_id is not None: + raise ValueError(_('Network must be disassociated from project %s' + ' before delete' % network.project_id)) + db.network_delete_safe(context.get_admin_context(), network.id) + class ServiceCommands(object): """Enable and disable running services""" diff --git a/etc/nova-api.conf b/etc/api-paste.ini index 9f7e93d4c..9f7e93d4c 100644 --- a/etc/nova-api.conf +++ b/etc/api-paste.ini diff --git a/nova/api/direct.py b/nova/api/direct.py index 208b6d086..dfca250e0 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -187,7 +187,7 @@ class ServiceWrapper(wsgi.Controller): def __init__(self, service_handle): self.service_handle = service_handle - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] @@ -206,7 +206,7 @@ class ServiceWrapper(wsgi.Controller): params = dict([(str(k), v) for (k, v) in params.iteritems()]) result = method(context, **params) if type(result) is dict or type(result) is list: - return self._serialize(result, req) + return self._serialize(result, req.best_match_content_type()) else: return result @@ -218,7 +218,7 @@ class Proxy(object): self.prefix = prefix def __do_request(self, path, context, **kwargs): - req = webob.Request.blank(path) + req = wsgi.Request.blank(path) req.method = 'POST' req.body = urllib.urlencode({'json': utils.dumps(kwargs)}) req.environ['openstack.context'] = context diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 5adc2c075..fccebca5d 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -53,7 +53,7 @@ flags.DEFINE_list('lockout_memcached_servers', None, class RequestLogging(wsgi.Middleware): """Access-Log akin logging for all EC2 API requests.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): start = utils.utcnow() rv = req.get_response(self.application) @@ -112,7 +112,7 @@ class Lockout(wsgi.Middleware): debug=0) super(Lockout, self).__init__(application) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): access_key = str(req.params['AWSAccessKeyId']) failures_key = "authfailures-%s" % access_key @@ -141,7 +141,7 @@ class Authenticate(wsgi.Middleware): """Authenticate an EC2 request and add 'ec2.context' to WSGI environ.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): # Read request signature and access id. try: @@ -190,7 +190,7 @@ class Requestify(wsgi.Middleware): super(Requestify, self).__init__(app) self.controller = utils.import_class(controller)() - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Version', 'Timestamp'] @@ -198,6 +198,12 @@ class Requestify(wsgi.Middleware): try: # Raise KeyError if omitted action = req.params['Action'] + # Fix bug lp:720157 for older (version 1) clients + version = req.params['SignatureVersion'] + if int(version) == 1: + non_args.remove('SignatureMethod') + if 'SignatureMethod' in args: + args.pop('SignatureMethod') for non_arg in non_args: # Remove, but raise KeyError if omitted args.pop(non_arg) @@ -269,7 +275,7 @@ class Authorizer(wsgi.Middleware): }, } - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['ec2.context'] controller = req.environ['ec2.request'].controller.__class__.__name__ @@ -303,7 +309,7 @@ class Executor(wsgi.Application): response, or a 400 upon failure. """ - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['ec2.context'] api_request = req.environ['ec2.request'] @@ -365,7 +371,7 @@ class Executor(wsgi.Application): class Versions(wsgi.Application): - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Respond to a request for all EC2 versions.""" # available api versions diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 2b1acba5a..d7ad08d2f 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -52,7 +52,23 @@ def _database_to_isoformat(datetimeobj): def _try_convert(value): - """Return a non-string if possible""" + """Return a non-string from a string or unicode, if possible. + + ============= ===================================================== + When value is returns + ============= ===================================================== + zero-length '' + 'None' None + 'True' True + 'False' False + '0', '-0' 0 + 0xN, -0xN int from hex (postitive) (N is any number) + 0bN, -0bN int from binary (positive) (N is any number) + * try conversion to int, float, complex, fallback value + + """ + if len(value) == 0: + return '' if value == 'None': return None if value == 'True': diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 844ccbe5e..b7d72d1c1 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -115,7 +115,7 @@ class CloudController(object): start = os.getcwd() os.chdir(FLAGS.ca_path) # TODO(vish): Do this with M2Crypto instead - utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh") + utils.runthis(_("Generating root CA: %s"), "sh", "genrootca.sh") os.chdir(start) def _get_mpi_data(self, context, project_id): @@ -298,7 +298,7 @@ class CloudController(object): 'keyFingerprint': key_pair['fingerprint'], }) - return {'keypairsSet': result} + return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Create key pair %s"), key_name, context=context) @@ -838,14 +838,14 @@ class CloudController(object): self.compute_api.unrescue(context, instance_id=instance_id) return True - def update_instance(self, context, ec2_id, **kwargs): + def update_instance(self, context, instance_id, **kwargs): updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: if field in kwargs: changes[field] = kwargs[field] if changes: - instance_id = ec2_id_to_id(ec2_id) + instance_id = ec2_id_to_id(instance_id) self.compute_api.update(context, instance_id=instance_id, **kwargs) return True diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 6fb441656..28f99b0ef 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -65,7 +65,7 @@ class MetadataRequestHandler(wsgi.Application): data = data[item] return data - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): cc = cloud.CloudController() remote_address = req.remote_addr diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 274330e3b..197fcc619 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -47,7 +47,7 @@ flags.DEFINE_bool('allow_admin_api', class FaultWrapper(wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) @@ -115,7 +115,7 @@ class APIRouter(wsgi.Router): class Versions(wsgi.Application): - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Respond to a request for all OpenStack API versions.""" response = { @@ -124,4 +124,6 @@ class Versions(wsgi.Application): metadata = { "application/xml": { "attributes": dict(version=["status", "id"])}} - return wsgi.Serializer(req.environ, metadata).to_content_type(response) + + content_type = req.best_match_content_type() + return wsgi.Serializer(metadata).serialize(response, content_type) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 6011e6115..de8905f46 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -46,7 +46,7 @@ class AuthMiddleware(wsgi.Middleware): self.auth = auth.manager.AuthManager() super(AuthMiddleware, self).__init__(application) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if not self.has_authentication(req): return self.authenticate(req) @@ -121,7 +121,7 @@ class AuthMiddleware(wsgi.Middleware): username - string key - string API key - req - webob.Request object + req - wsgi.Request object """ ctxt = context.get_admin_context() user = self.auth.get_user_from_access_key(key) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 9f85c5c8a..74ac21024 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -25,7 +25,7 @@ def limited(items, request, max_limit=1000): Return a slice of items according to requested offset and limit. @param items: A sliceable entity - @param request: `webob.Request` possibly containing 'offset' and 'limit' + @param request: `wsgi.Request` possibly containing 'offset' and 'limit' GET variables. 'offset' is where to start in the list, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default @@ -36,15 +36,18 @@ def limited(items, request, max_limit=1000): try: offset = int(request.GET.get('offset', 0)) except ValueError: - offset = 0 + raise webob.exc.HTTPBadRequest(_('offset param must be an integer')) try: limit = int(request.GET.get('limit', max_limit)) except ValueError: - limit = max_limit + raise webob.exc.HTTPBadRequest(_('limit param must be an integer')) - if offset < 0 or limit < 0: - raise webob.exc.HTTPBadRequest() + if limit < 0: + raise webob.exc.HTTPBadRequest(_('limit param must be positive')) + + if offset < 0: + raise webob.exc.HTTPBadRequest(_('offset param must be positive')) limit = min(max_limit, limit or max_limit) range_end = offset + limit diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 9ebdbe710..8c291c2eb 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -65,7 +65,7 @@ class Controller(wsgi.Controller): def create(self, req, server_id): """Creates a new console""" - #info = self._deserialize(req.body, req) + #info = self._deserialize(req.body, req.get_content_type()) self.console_api.create_console( req.environ['nova.context'], int(server_id)) diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 224a7ef0b..2fd733299 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -42,7 +42,7 @@ class Fault(webob.exc.HTTPException): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. @@ -57,6 +57,7 @@ class Fault(webob.exc.HTTPException): fault_data[fault_name]['retryAfter'] = retry # 'code' is an attribute on the fault tag itself metadata = {'application/xml': {'attributes': {fault_name: 'code'}}} - serializer = wsgi.Serializer(req.environ, metadata) - self.wrapped_exc.body = serializer.to_content_type(fault_data) + serializer = wsgi.Serializer(metadata) + content_type = req.best_match_content_type() + self.wrapped_exc.body = serializer.serialize(fault_data, content_type) return self.wrapped_exc diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index cf85a496f..98f0dd96b 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -151,7 +151,7 @@ class Controller(wsgi.Controller): def create(self, req): context = req.environ['nova.context'] - env = self._deserialize(req.body, req) + env = self._deserialize(req.body, req.get_content_type()) instance_id = env["image"]["serverId"] name = env["image"]["name"] diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index cbb4b897e..88ffc3246 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -57,7 +57,7 @@ class RateLimitingMiddleware(wsgi.Middleware): self.limiter = WSGIAppProxy(service_host) super(RateLimitingMiddleware, self).__init__(application) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Rate limit the request. @@ -183,7 +183,7 @@ class WSGIApp(object): """Create the WSGI application using the given Limiter instance.""" self.limiter = limiter - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): parts = req.path_info.split('/') # format: /limiter/<username>/<urlencoded action> diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 08b95b46a..dc28a0782 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -98,7 +98,7 @@ class Controller(wsgi.Controller): 'application/xml': { "attributes": { "server": ["id", "imageId", "name", "flavorId", "hostId", - "status", "progress"]}}} + "status", "progress", "adminPass"]}}} def __init__(self): self.compute_api = compute.API() @@ -141,7 +141,7 @@ class Controller(wsgi.Controller): def create(self, req): """ Creates a new server for a given user """ - env = self._deserialize(req.body, req) + env = self._deserialize(req.body, req.get_content_type()) if not env: return faults.Fault(exc.HTTPUnprocessableEntity()) @@ -178,11 +178,21 @@ class Controller(wsgi.Controller): key_data=key_pair['public_key'], metadata=metadata, onset_files=env.get('onset_files', [])) - return _translate_keys(instances[0]) + + server = _translate_keys(instances[0]) + password = "%s%s" % (server['server']['name'][:4], + utils.generate_password(12)) + server['server']['adminPass'] = password + self.compute_api.set_admin_password(context, server['server']['id'], + password) + return server def update(self, req, id): """ Updates the server name or password """ - inst_dict = self._deserialize(req.body, req) + if len(req.body) == 0: + raise exc.HTTPUnprocessableEntity() + + inst_dict = self._deserialize(req.body, req.get_content_type()) if not inst_dict: return faults.Fault(exc.HTTPUnprocessableEntity()) @@ -203,10 +213,58 @@ class Controller(wsgi.Controller): return exc.HTTPNoContent() def action(self, req, id): - """ Multi-purpose method used to reboot, rebuild, and - resize a server """ - input_dict = self._deserialize(req.body, req) - #TODO(sandy): rebuild/resize not supported. + """Multi-purpose method used to reboot, rebuild, or + resize a server""" + + actions = { + 'reboot': self._action_reboot, + 'resize': self._action_resize, + 'confirmResize': self._action_confirm_resize, + 'revertResize': self._action_revert_resize, + 'rebuild': self._action_rebuild, + } + + input_dict = self._deserialize(req.body, req.get_content_type()) + for key in actions.keys(): + if key in input_dict: + return actions[key](input_dict, req, id) + return faults.Fault(exc.HTTPNotImplemented()) + + def _action_confirm_resize(self, input_dict, req, id): + try: + self.compute_api.confirm_resize(req.environ['nova.context'], id) + except Exception, e: + LOG.exception(_("Error in confirm-resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPNoContent() + + def _action_revert_resize(self, input_dict, req, id): + try: + self.compute_api.revert_resize(req.environ['nova.context'], id) + except Exception, e: + LOG.exception(_("Error in revert-resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() + + def _action_rebuild(self, input_dict, req, id): + return faults.Fault(exc.HTTPNotImplemented()) + + def _action_resize(self, input_dict, req, id): + """ Resizes a given instance to the flavor size requested """ + try: + if 'resize' in input_dict and 'flavorId' in input_dict['resize']: + flavor_id = input_dict['resize']['flavorId'] + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + else: + LOG.exception(_("Missing arguments for resize")) + return faults.Fault(exc.HTTPUnprocessableEntity()) + except Exception, e: + LOG.exception(_("Error in resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return faults.Fault(exc.HTTPAccepted()) + + def _action_reboot(self, input_dict, req, id): try: reboot_type = input_dict['reboot']['type'] except Exception: @@ -402,7 +460,7 @@ class Controller(wsgi.Controller): _("Cannot build from image %(image_id)s, status not active") % locals()) - if image['type'] != 'machine': + if image['disk_format'] != 'ami': return None, None try: diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index d5206da20..cf6cd789f 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -67,13 +67,13 @@ class Controller(wsgi.Controller): def create(self, req): context = req.environ['nova.context'] - env = self._deserialize(req.body, req) + env = self._deserialize(req.body, req.get_content_type()) zone = db.zone_create(context, env["zone"]) return dict(zone=_scrub_zone(zone)) def update(self, req, id): context = req.environ['nova.context'] - env = self._deserialize(req.body, req) + env = self._deserialize(req.body, req.get_content_type()) zone_id = int(id) zone = db.zone_update(context, zone_id, env["zone"]) return dict(zone=_scrub_zone(zone)) diff --git a/nova/compute/api.py b/nova/compute/api.py index 35a7d7bc0..a0bb2cf04 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -319,12 +319,12 @@ class API(base.Base): try: instance = self.get(context, instance_id) except exception.NotFound: - LOG.warning(_("Instance %d was not found during terminate"), + LOG.warning(_("Instance %s was not found during terminate"), instance_id) raise if (instance['state_description'] == 'terminating'): - LOG.warning(_("Instance %d is already being terminated"), + LOG.warning(_("Instance %s is already being terminated"), instance_id) return @@ -404,6 +404,10 @@ class API(base.Base): kwargs = {'method': method, 'args': params} return rpc.call(context, queue, kwargs) + def _cast_scheduler_message(self, context, args): + """Generic handler for RPC calls to the scheduler""" + rpc.cast(context, FLAGS.scheduler_topic, args) + def snapshot(self, context, instance_id, name): """Snapshot the given instance. @@ -420,6 +424,45 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) + def revert_resize(self, context, instance_id): + """Reverts a resize, deleting the 'new' instance in the process""" + context = context.elevated() + migration_ref = self.db.migration_get_by_instance_and_status(context, + instance_id, 'finished') + if not migration_ref: + raise exception.NotFound(_("No finished migrations found for " + "instance")) + + params = {'migration_id': migration_ref['id']} + self._cast_compute_message('revert_resize', context, instance_id, + migration_ref['dest_compute'], params=params) + + def confirm_resize(self, context, instance_id): + """Confirms a migration/resize, deleting the 'old' instance in the + process.""" + context = context.elevated() + migration_ref = self.db.migration_get_by_instance_and_status(context, + instance_id, 'finished') + if not migration_ref: + raise exception.NotFound(_("No finished migrations found for " + "instance")) + instance_ref = self.db.instance_get(context, instance_id) + params = {'migration_id': migration_ref['id']} + self._cast_compute_message('confirm_resize', context, instance_id, + migration_ref['source_compute'], params=params) + + self.db.migration_update(context, migration_id, + {'status': 'confirmed'}) + self.db.instance_update(context, instance_id, + {'host': migration_ref['dest_compute'], }) + + def resize(self, context, instance_id, flavor): + """Resize a running instance.""" + self._cast_scheduler_message(context, + {"method": "prep_resize", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id, }},) + def pause(self, context, instance_id): """Pause the given instance.""" self._cast_compute_message('pause_instance', context, instance_id) @@ -455,9 +498,10 @@ class API(base.Base): """Unrescue the given instance.""" self._cast_compute_message('unrescue_instance', context, instance_id) - def set_admin_password(self, context, instance_id): + def set_admin_password(self, context, instance_id, password=None): """Set the root/admin password for the given instance.""" - self._cast_compute_message('set_admin_password', context, instance_id) + self._cast_compute_message('set_admin_password', context, instance_id, + password) def inject_file(self, context, instance_id): """Write a file to the given instance.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3af97683f..b35216dd3 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -413,6 +413,110 @@ class ComputeManager(manager.Manager): @exception.wrap_exception @checks_instance_lock + def confirm_resize(self, context, instance_id, migration_id): + """Destroys the source instance""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + migration_ref = self.db.migration_get(context, migration_id) + self.driver.destroy(instance_ref) + + @exception.wrap_exception + @checks_instance_lock + def revert_resize(self, context, instance_id, migration_id): + """Destroys the new instance on the destination machine, + reverts the model changes, and powers on the old + instance on the source machine""" + instance_ref = self.db.instance_get(context, instance_id) + migration_ref = self.db.migration_get(context, migration_id) + + #TODO(mdietz): we may want to split these into separate methods. + if migration_ref['source_compute'] == FLAGS.host: + self.driver._start(instance_ref) + self.db.migration_update(context, migration_id, + {'status': 'reverted'}) + else: + self.driver.destroy(instance_ref) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, + instance_ref['host']) + rpc.cast(context, topic, + {'method': 'revert_resize', + 'args': { + 'migration_id': migration_ref['id'], + 'instance_id': instance_id, }, + }) + + @exception.wrap_exception + @checks_instance_lock + def prep_resize(self, context, instance_id): + """Initiates the process of moving a running instance to another + host, possibly changing the RAM and disk size in the process""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + if instance_ref['host'] == FLAGS.host: + raise exception.Error(_( + 'Migration error: destination same as source!')) + + migration_ref = self.db.migration_create(context, + {'instance_id': instance_id, + 'source_compute': instance_ref['host'], + 'dest_compute': FLAGS.host, + 'dest_host': self.driver.get_host_ip_addr(), + 'status': 'pre-migrating'}) + LOG.audit(_('instance %s: migrating to '), instance_id, + context=context) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, + instance_ref['host']) + rpc.cast(context, topic, + {'method': 'resize_instance', + 'args': { + 'migration_id': migration_ref['id'], + 'instance_id': instance_id, }, + }) + + @exception.wrap_exception + @checks_instance_lock + def resize_instance(self, context, instance_id, migration_id): + """Starts the migration of a running instance to another host""" + migration_ref = self.db.migration_get(context, migration_id) + instance_ref = self.db.instance_get(context, instance_id) + self.db.migration_update(context, migration_id, + {'status': 'migrating', }) + + disk_info = self.driver.migrate_disk_and_power_off(instance_ref, + migration_ref['dest_host']) + self.db.migration_update(context, migration_id, + {'status': 'post-migrating', }) + + #TODO(mdietz): This is where we would update the VM record + #after resizing + service = self.db.service_get_by_host_and_topic(context, + migration_ref['dest_compute'], FLAGS.compute_topic) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, + migration_ref['dest_compute']) + rpc.cast(context, topic, + {'method': 'finish_resize', + 'args': { + 'migration_id': migration_id, + 'instance_id': instance_id, + 'disk_info': disk_info, }, + }) + + @exception.wrap_exception + @checks_instance_lock + def finish_resize(self, context, instance_id, migration_id, disk_info): + """Completes the migration process by setting up the newly transferred + disk and turning on the instance on its new host machine""" + migration_ref = self.db.migration_get(context, migration_id) + instance_ref = self.db.instance_get(context, + migration_ref['instance_id']) + + self.driver.finish_resize(instance_ref, disk_info) + + self.db.migration_update(context, migration_id, + {'status': 'finished', }) + + @exception.wrap_exception + @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this server.""" context = context.elevated() diff --git a/nova/console/xvp.py b/nova/console/xvp.py index cd257e0a6..68d8c8565 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -133,10 +133,10 @@ class XVPConsoleProxy(object): return logging.debug(_("Starting xvp")) try: - utils.execute('xvp -p %s -c %s -l %s' % - (FLAGS.console_xvp_pid, - FLAGS.console_xvp_conf, - FLAGS.console_xvp_log)) + utils.execute('xvp', + '-p', FLAGS.console_xvp_pid, + '-c', FLAGS.console_xvp_conf, + '-l', FLAGS.console_xvp_log) except exception.ProcessExecutionError, err: logging.error(_("Error starting xvp: %s") % err) @@ -190,5 +190,5 @@ class XVPConsoleProxy(object): flag = '-x' #xvp will blow up on passwords that are too long (mdragon) password = password[:maxlen] - out, err = utils.execute('xvp %s' % flag, process_input=password) + out, err = utils.execute('xvp', flag, process_input=password) return out.strip() diff --git a/nova/crypto.py b/nova/crypto.py index a34b940f5..2a8d4abca 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -105,8 +105,10 @@ def generate_key_pair(bits=1024): tmpdir = tempfile.mkdtemp() keyfile = os.path.join(tmpdir, 'temp') - utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile)) - (out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile)) + utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '', + '-f', keyfile) + (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', + '%s.pub' % (keyfile)) fingerprint = out.split(' ')[1] private_key = open(keyfile).read() public_key = open(keyfile + '.pub').read() @@ -118,7 +120,8 @@ def generate_key_pair(bits=1024): # bio = M2Crypto.BIO.MemoryBuffer() # key.save_pub_key_bio(bio) # public_key = bio.read() - # public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key) + # public_key, err = execute('ssh-keygen', '-y', '-f', + # '/dev/stdin', private_key) return (private_key, public_key, fingerprint) @@ -143,9 +146,10 @@ def revoke_cert(project_id, file_name): start = os.getcwd() os.chdir(ca_folder(project_id)) # NOTE(vish): potential race condition here - utils.execute("openssl ca -config ./openssl.cnf -revoke '%s'" % file_name) - utils.execute("openssl ca -gencrl -config ./openssl.cnf -out '%s'" % - FLAGS.crl_file) + utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke', + file_name) + utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf', + '-out', FLAGS.crl_file) os.chdir(start) @@ -193,9 +197,9 @@ def generate_x509_cert(user_id, project_id, bits=1024): tmpdir = tempfile.mkdtemp() keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) csrfile = os.path.join(tmpdir, 'temp.csr') - utils.execute("openssl genrsa -out %s %s" % (keyfile, bits)) - utils.execute("openssl req -new -key %s -out %s -batch -subj %s" % - (keyfile, csrfile, subject)) + utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits)) + utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out', csrfile, + '-batch', '-subj', subject) private_key = open(keyfile).read() csr = open(csrfile).read() shutil.rmtree(tmpdir) @@ -212,8 +216,8 @@ def _ensure_project_folder(project_id): if not os.path.exists(ca_path(project_id)): start = os.getcwd() os.chdir(ca_folder()) - utils.execute("sh geninter.sh %s %s" % - (project_id, _project_cert_subject(project_id))) + utils.execute('sh', 'geninter.sh', project_id, + _project_cert_subject(project_id)) os.chdir(start) @@ -228,8 +232,8 @@ def generate_vpn_files(project_id): start = os.getcwd() os.chdir(ca_folder()) # TODO(vish): the shell scripts could all be done in python - utils.execute("sh genvpn.sh %s %s" % - (project_id, _vpn_cert_subject(project_id))) + utils.execute('sh', 'genvpn.sh', + project_id, _vpn_cert_subject(project_id)) with open(csr_fn, "r") as csrfile: csr_text = csrfile.read() (serial, signed_csr) = sign_csr(csr_text, project_id) @@ -259,9 +263,10 @@ def _sign_csr(csr_text, ca_folder): start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) - utils.execute("openssl ca -batch -out %s -config " - "./openssl.cnf -infiles %s" % (outbound, inbound)) - out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound) + utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config', + './openssl.cnf', '-infiles', inbound) + out, _err = utils.execute('openssl', 'x509', '-in', outbound, + '-serial', '-noout') serial = out.rpartition("=")[2] os.chdir(start) with open(outbound, "r") as crtfile: diff --git a/nova/db/api.py b/nova/db/api.py index d23f14a3c..aa86f0af1 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -80,10 +80,15 @@ def service_destroy(context, instance_id): def service_get(context, service_id): - """Get an service or raise if it does not exist.""" + """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) +def service_get_by_host_and_topic(context, host, topic): + """Get a service by host it's on and topic it listens to""" + return IMPL.service_get_by_host_and_topic(context, host, topic) + + def service_get_all(context, disabled=False): """Get all services.""" return IMPL.service_get_all(context, disabled) @@ -254,6 +259,28 @@ def floating_ip_get_by_address(context, address): #################### +def migration_update(context, id, values): + """Update a migration instance""" + return IMPL.migration_update(context, id, values) + + +def migration_create(context, values): + """Create a migration record""" + return IMPL.migration_create(context, values) + + +def migration_get(context, migration_id): + """Finds a migration by the id""" + return IMPL.migration_get(context, migration_id) + + +def migration_get_by_instance_and_status(context, instance_id, status): + """Finds a migration by the instance id its migrating""" + return IMPL.migration_get_by_instance_and_status(context, instance_id, + status) + +#################### + def fixed_ip_associate(context, address, instance_id): """Associate fixed ip to instance. @@ -490,6 +517,13 @@ def network_create_safe(context, values): return IMPL.network_create_safe(context, values) +def network_delete_safe(context, network_id): + """Delete network with key network_id. + This method assumes that the network is not associated with any project + """ + return IMPL.network_delete_safe(context, network_id) + + def network_create_fixed_ips(context, network_id, num_vpn_clients): """Create the ips for the network, reserving sepecified ips.""" return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients) @@ -526,6 +560,11 @@ def network_get_by_bridge(context, bridge): return IMPL.network_get_by_bridge(context, bridge) +def network_get_by_cidr(context, cidr): + """Get a network by cidr or raise if it does not exist""" + return IMPL.network_get_by_cidr(context, cidr) + + def network_get_by_instance(context, instance_id): """Get a network by instance id or raise if it does not exist.""" return IMPL.network_get_by_instance(context, instance_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 919dda118..3e94082df 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -155,6 +155,17 @@ def service_get_all_by_topic(context, topic): @require_admin_context +def service_get_by_host_and_topic(context, host, topic): + session = get_session() + return session.query(models.Service).\ + filter_by(deleted=False).\ + filter_by(disabled=False).\ + filter_by(host=host).\ + filter_by(topic=topic).\ + first() + + +@require_admin_context def service_get_all_by_host(context, host): session = get_session() return session.query(models.Service).\ @@ -1044,6 +1055,15 @@ def network_create_safe(context, values): @require_admin_context +def network_delete_safe(context, network_id): + session = get_session() + with session.begin(): + network_ref = network_get(context, network_id=network_id, \ + session=session) + session.delete(network_ref) + + +@require_admin_context def network_disassociate(context, network_id): network_update(context, network_id, {'project_id': None, 'host': None}) @@ -1117,6 +1137,18 @@ def network_get_by_bridge(context, bridge): @require_admin_context +def network_get_by_cidr(context, cidr): + session = get_session() + result = session.query(models.Network).\ + filter_by(cidr=cidr).first() + + if not result: + raise exception.NotFound(_('Network with cidr %s does not exist') % + cidr) + return result + + +@require_admin_context def network_get_by_instance(_context, instance_id): session = get_session() rv = session.query(models.Network).\ @@ -1972,6 +2004,51 @@ def host_get_networks(context, host): all() +################### + + +@require_admin_context +def migration_create(context, values): + migration = models.Migration() + migration.update(values) + migration.save() + return migration + + +@require_admin_context +def migration_update(context, id, values): + session = get_session() + with session.begin(): + migration = migration_get(context, id, session=session) + migration.update(values) + migration.save(session=session) + return migration + + +@require_admin_context +def migration_get(context, id, session=None): + if not session: + session = get_session() + result = session.query(models.Migration).\ + filter_by(id=id).first() + if not result: + raise exception.NotFound(_("No migration found with id %s") + % migration_id) + return result + + +@require_admin_context +def migration_get_by_instance_and_status(context, instance_id, status): + session = get_session() + result = session.query(models.Migration).\ + filter_by(instance_id=instance_id).\ + filter_by(status=status).first() + if not result: + raise exception.NotFound(_("No migration found with instance id %s") + % migration_id) + return result + + ################## diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py new file mode 100644 index 000000000..4fda525f1 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.from sqlalchemy import * + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Tables +# + +migrations = Table('migrations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('source_compute', String(255)), + Column('dest_compute', String(255)), + Column('dest_host', String(255)), + Column('instance_id', Integer, ForeignKey('instances.id'), + nullable=True), + Column('status', String(255)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (migrations, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index 9bdaa6d6b..d9e303599 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -60,7 +60,7 @@ def db_version(): 'key_pairs', 'networks', 'projects', 'quotas', 'security_group_instance_association', 'security_group_rules', 'security_groups', - 'services', + 'services', 'migrations', 'users', 'user_project_association', 'user_project_role_association', 'user_role_association', diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 5e7c399ef..6ef284e65 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -389,6 +389,18 @@ class KeyPair(BASE, NovaBase): public_key = Column(Text) +class Migration(BASE, NovaBase): + """Represents a running host-to-host migration.""" + __tablename__ = 'migrations' + id = Column(Integer, primary_key=True, nullable=False) + source_compute = Column(String(255)) + dest_compute = Column(String(255)) + dest_host = Column(String(255)) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + #TODO(_cerberus_): enum + status = Column(String(255)) + + class Network(BASE, NovaBase): """Represents a network.""" __tablename__ = 'networks' @@ -598,7 +610,7 @@ def register_models(): Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, Project, Certificate, ConsolePool, Console, Zone, - InstanceMetadata) + InstanceMetadata, Migration) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/exception.py b/nova/exception.py index 7d65bd6a5..93c5fe3d7 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -88,6 +88,10 @@ class InvalidInputException(Error): pass +class InvalidContentType(Error): + pass + + class TimeoutException(Error): pass diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index dd82a9366..a7dee8caf 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -48,7 +48,6 @@ class Exchange(object): nm = self.name LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)' ' %(message)s') % locals()) - routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: LOG.debug(_('Publishing to route %s'), f) diff --git a/nova/log.py b/nova/log.py index 87a21ddb4..d194ab8f0 100644 --- a/nova/log.py +++ b/nova/log.py @@ -266,7 +266,10 @@ class NovaRootLogger(NovaLogger): def handle_exception(type, value, tb): - logging.root.critical(str(value), exc_info=(type, value, tb)) + extra = {} + if FLAGS.verbose: + extra['exc_info'] = (type, value, tb) + logging.root.critical(str(value), **extra) def reset(): diff --git a/nova/network/api.py b/nova/network/api.py index bf43acb51..4ee1148cb 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -21,6 +21,7 @@ Handles all requests relating to instances (guest vms). """ from nova import db +from nova import exception from nova import flags from nova import log as logging from nova import quota diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 57e77f8d5..661299765 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -290,12 +290,14 @@ class IptablesManager(object): for cmd, tables in s: for table in tables: - current_table, _ = self.execute('sudo %s-save -t %s' % - (cmd, table), attempts=5) + current_table, _ = self.execute('sudo', + '%s-save' % (cmd,), + '-t', '%s' % (table,), + attempts=5) current_lines = current_table.split('\n') new_filter = self._modify_rules(current_lines, tables[table]) - self.execute('sudo %s-restore' % (cmd,), + self.execute('sudo', '%s-restore' % (cmd,), process_input='\n'.join(new_filter), attempts=5) @@ -371,7 +373,6 @@ def metadata_forward(): def init_host(): """Basic networking setup goes here""" - # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. iptables_manager.ipv4['nat'].add_rule("snat", @@ -392,15 +393,15 @@ def init_host(): def bind_floating_ip(floating_ip, check_exit_code=True): """Bind ip to public interface""" - _execute("sudo ip addr add %s dev %s" % (floating_ip, - FLAGS.public_interface), + _execute('sudo', 'ip', 'addr', 'add', floating_ip, + 'dev', FLAGS.public_interface, check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): """Unbind a public ip from public interface""" - _execute("sudo ip addr del %s dev %s" % (floating_ip, - FLAGS.public_interface)) + _execute('sudo', 'ip', 'addr', 'del', floating_ip, + 'dev', FLAGS.public_interface) def ensure_vlan_forward(public_ip, port, private_ip): @@ -448,9 +449,9 @@ def ensure_vlan(vlan_num): interface = "vlan%s" % vlan_num if not _device_exists(interface): LOG.debug(_("Starting VLAN inteface %s"), interface) - _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") - _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) - _execute("sudo ip link set %s up" % interface) + _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD') + _execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num) + _execute('sudo', 'ip', 'link', 'set', interface, 'up') return interface @@ -469,52 +470,57 @@ def ensure_bridge(bridge, interface, net_attrs=None): """ if not _device_exists(bridge): LOG.debug(_("Starting Bridge interface for %s"), interface) - _execute("sudo brctl addbr %s" % bridge) - _execute("sudo brctl setfd %s 0" % bridge) + _execute('sudo', 'brctl', 'addbr', bridge) + _execute('sudo', 'brctl', 'setfd', bridge, 0) # _execute("sudo brctl setageing %s 10" % bridge) - _execute("sudo brctl stp %s off" % bridge) - _execute("sudo ip link set %s up" % bridge) + _execute('sudo', 'brctl', 'stp', bridge, 'off') + _execute('sudo', 'ip', 'link', 'set', bridge, up) if net_attrs: # NOTE(vish): The ip for dnsmasq has to be the first address on the # bridge for it to respond to reqests properly suffix = net_attrs['cidr'].rpartition('/')[2] - out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" % - (net_attrs['gateway'], - suffix, - net_attrs['broadcast'], - bridge), + out, err = _execute('sudo', 'ip', 'addr', 'add', + "%s/%s" % + (net_attrs['gateway'], suffix), + 'brd', + net_attrs['broadcast'], + 'dev', + bridge, check_exit_code=False) if err and err != "RTNETLINK answers: File exists\n": raise exception.Error("Failed to add ip: %s" % err) if(FLAGS.use_ipv6): - _execute("sudo ip -f inet6 addr change %s dev %s" % - (net_attrs['cidr_v6'], bridge)) + _execute('sudo', 'ip', '-f', 'inet6', 'addr', + 'change', net_attrs['cidr_v6'], + 'dev', bridge) # NOTE(vish): If the public interface is the same as the # bridge, then the bridge has to be in promiscuous # to forward packets properly. if(FLAGS.public_interface == bridge): - _execute("sudo ip link set dev %s promisc on" % bridge) + _execute('sudo', 'ip', 'link', 'set', + 'dev', bridge, 'promisc', 'on') if interface: # NOTE(vish): This will break if there is already an ip on the # interface, so we move any ips to the bridge gateway = None - out, err = _execute("sudo route -n") + out, err = _execute('sudo', 'route', '-n') for line in out.split("\n"): fields = line.split() if fields and fields[0] == "0.0.0.0" and fields[-1] == interface: gateway = fields[1] - out, err = _execute("sudo ip addr show dev %s scope global" % - interface) + out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, + 'scope', 'global') for line in out.split("\n"): fields = line.split() if fields and fields[0] == "inet": params = ' '.join(fields[1:-1]) - _execute("sudo ip addr del %s dev %s" % (params, fields[-1])) - _execute("sudo ip addr add %s dev %s" % (params, bridge)) + _execute('sudo', 'ip', 'addr', + 'del', params, 'dev', fields[-1]) + _execute('sudo', 'ip', 'addr', + 'add', params, 'dev', bridge) if gateway: - _execute("sudo route add 0.0.0.0 gw %s" % gateway) - out, err = _execute("sudo brctl addif %s %s" % - (bridge, interface), + _execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway) + out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, check_exit_code=False) if (err and err != "device %s is already a member of a bridge; can't " @@ -560,11 +566,11 @@ def update_dhcp(context, network_id): # if dnsmasq is already running, then tell it to reload if pid: - out, _err = _execute('cat /proc/%d/cmdline' % pid, + out, _err = _execute('cat', "/proc/%d/cmdline" % pid, check_exit_code=False) if conffile in out: try: - _execute('sudo kill -HUP %d' % pid) + _execute('sudo', 'kill', '-HUP', pid) return except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("Hupping dnsmasq threw %s"), exc) @@ -605,11 +611,11 @@ interface %s # if radvd is already running, then tell it to reload if pid: - out, _err = _execute('cat /proc/%d/cmdline' + out, _err = _execute('cat', '/proc/%d/cmdline' % pid, check_exit_code=False) if conffile in out: try: - _execute('sudo kill %d' % pid) + _execute('sudo', 'kill', pid) except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("killing radvd threw %s"), exc) else: @@ -630,18 +636,18 @@ def _host_dhcp(fixed_ip_ref): fixed_ip_ref['address']) -def _execute(cmd, *args, **kwargs): +def _execute(*cmd, **kwargs): """Wrapper around utils._execute for fake_network""" if FLAGS.fake_network: - LOG.debug("FAKE NET: %s", cmd) + LOG.debug("FAKE NET: %s", " ".join(map(str, cmd))) return "fake", 0 else: - return utils.execute(cmd, *args, **kwargs) + return utils.execute(*cmd, **kwargs) def _device_exists(device): """Check if ethernet device exists""" - (_out, err) = _execute("ip link show dev %s" % device, + (_out, err) = _execute('ip', 'link', 'show', 'dev', device, check_exit_code=False) return not err @@ -680,7 +686,7 @@ def _stop_dnsmasq(network): if pid: try: - _execute('sudo kill -TERM %d' % pid) + _execute('sudo', 'kill', '-TERM', pid) except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("Killing dnsmasq threw %s"), exc) diff --git a/nova/network/manager.py b/nova/network/manager.py index b36dd59cf..3dfc48934 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -563,6 +563,16 @@ class VlanManager(NetworkManager): # NOTE(vish): This makes ports unique accross the cloud, a more # robust solution would be to make them unique per ip net['vpn_public_port'] = vpn_start + index + network_ref = None + try: + network_ref = db.network_get_by_cidr(context, cidr) + except exception.NotFound: + pass + + if network_ref is not None: + raise ValueError(_('Network with cidr %s already exists' % + cidr)) + network_ref = self.db.network_create_safe(context, net) if network_ref: self._create_fixed_ips(context, network_ref['id']) diff --git a/nova/rpc.py b/nova/rpc.py index 8fe4565dd..fbb90299b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -123,7 +123,7 @@ class Consumer(messaging.Consumer): LOG.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't - # exceptions to be logged 10 times a second if some + # want exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: diff --git a/nova/service.py b/nova/service.py index f47358089..af20db01c 100644 --- a/nova/service.py +++ b/nova/service.py @@ -2,6 +2,7 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -39,6 +40,7 @@ from nova import flags from nova import rpc from nova import utils from nova import version +from nova import wsgi FLAGS = flags.FLAGS @@ -48,6 +50,14 @@ flags.DEFINE_integer('report_interval', 10, flags.DEFINE_integer('periodic_interval', 60, 'seconds between running periodic tasks', lower_bound=1) +flags.DEFINE_string('ec2_listen', "0.0.0.0", + 'IP address for EC2 API to listen') +flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen') +flags.DEFINE_string('osapi_listen', "0.0.0.0", + 'IP address for OpenStack API to listen') +flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen') +flags.DEFINE_string('api_paste_config', "api-paste.ini", + 'File name for the paste.deploy config for nova-api') class Service(object): @@ -210,6 +220,41 @@ class Service(object): logging.exception(_("model server went away")) +class WsgiService(object): + """Base class for WSGI based services. + + For each api you define, you must also define these flags: + :<api>_listen: The address on which to listen + :<api>_listen_port: The port on which to listen + """ + + def __init__(self, conf, apis): + self.conf = conf + self.apis = apis + self.wsgi_app = None + + def start(self): + self.wsgi_app = _run_wsgi(self.conf, self.apis) + + def wait(self): + self.wsgi_app.wait() + + +class ApiService(WsgiService): + """Class for our nova-api service""" + @classmethod + def create(cls, conf=None): + if not conf: + conf = wsgi.paste_config_file(FLAGS.api_paste_config) + if not conf: + message = (_("No paste configuration found for: %s"), + FLAGS.api_paste_config) + raise exception.Error(message) + api_endpoints = ['ec2', 'osapi'] + service = cls(conf, api_endpoints) + return service + + def serve(*services): try: if not services: @@ -239,3 +284,46 @@ def serve(*services): def wait(): while True: greenthread.sleep(5) + + +def serve_wsgi(cls, conf=None): + try: + service = cls.create(conf) + except Exception: + logging.exception('in WsgiService.create()') + raise + finally: + # After we've loaded up all our dynamic bits, check + # whether we should print help + flags.DEFINE_flag(flags.HelpFlag()) + flags.DEFINE_flag(flags.HelpshortFlag()) + flags.DEFINE_flag(flags.HelpXMLFlag()) + FLAGS.ParseNewFlags() + + service.start() + + return service + + +def _run_wsgi(paste_config_file, apis): + logging.debug(_("Using paste.deploy config at: %s"), paste_config_file) + apps = [] + for api in apis: + config = wsgi.load_paste_configuration(paste_config_file, api) + if config is None: + logging.debug(_("No paste configuration for app: %s"), api) + continue + logging.debug(_("App Config: %(api)s\n%(config)r") % locals()) + logging.info(_("Running %s API"), api) + app = wsgi.load_paste_app(paste_config_file, api) + apps.append((app, getattr(FLAGS, "%s_listen_port" % api), + getattr(FLAGS, "%s_listen" % api))) + if len(apps) == 0: + logging.error(_("No known API applications configured in %s."), + paste_config_file) + return + + server = wsgi.Server() + for app in apps: + server.start(*app) + return server diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py new file mode 100644 index 000000000..74bb8729a --- /dev/null +++ b/nova/tests/api/openstack/common.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +import webob + + +def webob_factory(url): + """Factory for removing duplicate webob code from tests""" + + base_url = url + + def web_request(url, method=None, body=None): + req = webob.Request.blank("%s%s" % (base_url, url)) + if method: + req.content_type = "application/json" + req.method = method + if body: + req.body = json.dumps(body) + return req + return web_request diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 92023362c..8f57c5b67 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -79,20 +79,14 @@ class LimiterTest(test.TestCase): Test offset key works with a blank offset. """ req = Request.blank('/?offset=') - self.assertEqual(limited(self.tiny, req), self.tiny) - self.assertEqual(limited(self.small, req), self.small) - self.assertEqual(limited(self.medium, req), self.medium) - self.assertEqual(limited(self.large, req), self.large[:1000]) + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) def test_limiter_offset_bad(self): """ Test offset key works with a BAD offset. """ req = Request.blank(u'/?offset=\u0020aa') - self.assertEqual(limited(self.tiny, req), self.tiny) - self.assertEqual(limited(self.small, req), self.small) - self.assertEqual(limited(self.medium, req), self.medium) - self.assertEqual(limited(self.large, req), self.large[:1000]) + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) def test_limiter_nothing(self): """ @@ -166,18 +160,12 @@ class LimiterTest(test.TestCase): """ Test a negative limit. """ - def _limit_large(): - limited(self.large, req, max_limit=2000) - req = Request.blank('/?limit=-3000') - self.assertRaises(webob.exc.HTTPBadRequest, _limit_large) + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) def test_limiter_negative_offset(self): """ Test a negative offset. """ - def _limit_large(): - limited(self.large, req, max_limit=2000) - req = Request.blank('/?offset=-30') - self.assertRaises(webob.exc.HTTPBadRequest, _limit_large) + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 78beb7df9..c1e05b18a 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 OpenStack LLC. +# Copyright 2010-2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -26,10 +26,12 @@ from nova import flags from nova import test import nova.api.openstack from nova.api.openstack import servers +import nova.compute.api import nova.db.api from nova.db.sqlalchemy.models import Instance from nova.db.sqlalchemy.models import InstanceMetadata import nova.rpc +from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes @@ -144,6 +146,8 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api) self.allow_admin = FLAGS.allow_admin_api + self.webreq = common.webob_factory('/v1.0/servers') + def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin @@ -184,9 +188,37 @@ class ServersTest(test.TestCase): self.assertEqual(s.get('imageId', None), None) i += 1 + def test_get_servers_with_limit(self): + req = webob.Request.blank('/v1.0/servers?limit=3') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [0, 1, 2]) + + req = webob.Request.blank('/v1.0/servers?limit=aaa') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + self.assertTrue('limit' in res.body) + + def test_get_servers_with_offset(self): + req = webob.Request.blank('/v1.0/servers?offset=2') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [2, 3, 4]) + + req = webob.Request.blank('/v1.0/servers?offset=aaa') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + self.assertTrue('offset' in res.body) + + def test_get_servers_with_limit_and_offset(self): + req = webob.Request.blank('/v1.0/servers?limit=2&offset=1') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [1, 2]) + def test_create_instance(self): def instance_create(context, inst): - return {'id': '1', 'display_name': ''} + return {'id': '1', 'display_name': 'server_test'} def server_update(context, id, params): return instance_create(context, id) @@ -227,9 +259,16 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.0/servers') req.method = 'POST' req.body = json.dumps(body) + req.headers["Content-Type"] = "application/json" res = req.get_response(fakes.wsgi_app()) + server = json.loads(res.body)['server'] + self.assertEqual('serv', server['adminPass'][:4]) + self.assertEqual(16, len(server['adminPass'])) + self.assertEqual('server_test', server['name']) + self.assertEqual('1', server['id']) + self.assertEqual(res.status_int, 200) def test_update_no_body(self): @@ -465,3 +504,99 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) + + def test_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_resize_bad_flavor_fails(self): + req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3))) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 422) + self.assertEqual(self.resize_called, False) + + def test_resize_raises_fails(self): + req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) + + def resize_mock(*args): + raise Exception('hurr durr') + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_confirm_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) + + self.resize_called = False + + def confirm_resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'confirm_resize', + confirm_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 204) + self.assertEqual(self.resize_called, True) + + def test_confirm_resize_server_fails(self): + req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) + + def confirm_resize_mock(*args): + raise Exception('hurr durr') + + self.stubs.Set(nova.compute.api.API, 'confirm_resize', + confirm_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_revert_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(revertResize=None)) + + self.resize_called = False + + def revert_resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'revert_resize', + revert_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_revert_resize_server_fails(self): + req = self.webreq('/1/action', 'POST', dict(revertResize=None)) + + def revert_resize_mock(*args): + raise Exception('hurr durr') + + self.stubs.Set(nova.compute.api.API, 'revert_resize', + revert_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + +if __name__ == "__main__": + unittest.main() diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 555b206b9..d0da8eaaf 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -86,24 +86,27 @@ class ZonesTest(test.TestCase): def test_get_zone_list(self): req = webob.Request.blank('/v1.0/zones') + req.headers["Content-Type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) self.assertEqual(len(res_dict['zones']), 2) def test_get_zone_by_id(self): req = webob.Request.blank('/v1.0/zones/1') + req.headers["Content-Type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') self.assertFalse('password' in res_dict['zone']) - self.assertEqual(res.status_int, 200) def test_zone_delete(self): req = webob.Request.blank('/v1.0/zones/1') + req.headers["Content-Type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) @@ -112,13 +115,14 @@ class ZonesTest(test.TestCase): body = dict(zone=dict(api_url='http://blah.zoo', username='fred', password='fubar')) req = webob.Request.blank('/v1.0/zones') + req.headers["Content-Type"] = "application/json" req.method = 'POST' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo') self.assertFalse('username' in res_dict['zone']) @@ -126,13 +130,14 @@ class ZonesTest(test.TestCase): def test_zone_update(self): body = dict(zone=dict(username='zeb', password='sneaky')) req = webob.Request.blank('/v1.0/zones/1') + req.headers["Content-Type"] = "application/json" req.method = 'PUT' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') self.assertFalse('username' in res_dict['zone']) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 2c7852214..b1a849cf9 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -21,11 +21,13 @@ Test WSGI basics and provide some helper functions for other WSGI tests. """ +import json from nova import test import routes import webob +from nova import exception from nova import wsgi @@ -66,63 +68,164 @@ class Test(test.TestCase): result = webob.Request.blank('/bad').get_response(Router()) self.assertNotEqual(result.body, "Router result") - def test_controller(self): - class Controller(wsgi.Controller): - """Test controller to call from router.""" - test = self +class ControllerTest(test.TestCase): - def show(self, req, id): # pylint: disable-msg=W0622,C0103 - """Default action called for requests with an ID.""" - self.test.assertEqual(req.path_info, '/tests/123') - self.test.assertEqual(id, '123') - return id + class TestRouter(wsgi.Router): - class Router(wsgi.Router): - """Test router.""" + class TestController(wsgi.Controller): - def __init__(self): - mapper = routes.Mapper() - mapper.resource("test", "tests", controller=Controller()) - super(Router, self).__init__(mapper) + _serialization_metadata = { + 'application/xml': { + "attributes": { + "test": ["id"]}}} - result = webob.Request.blank('/tests/123').get_response(Router()) - self.assertEqual(result.body, "123") - result = webob.Request.blank('/test/123').get_response(Router()) - self.assertNotEqual(result.body, "123") + def show(self, req, id): # pylint: disable-msg=W0622,C0103 + return {"test": {"id": id}} + + def __init__(self): + mapper = routes.Mapper() + mapper.resource("test", "tests", controller=self.TestController()) + wsgi.Router.__init__(self, mapper) + + def test_show(self): + request = wsgi.Request.blank('/tests/123') + result = request.get_response(self.TestRouter()) + self.assertEqual(json.loads(result.body), {"test": {"id": "123"}}) + + def test_response_content_type_from_accept_xml(self): + request = webob.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml" + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/xml") + + def test_response_content_type_from_accept_json(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/json") + + def test_response_content_type_from_query_extension_xml(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/xml") + + def test_response_content_type_from_query_extension_json(self): + request = wsgi.Request.blank('/tests/123.json') + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/json") + + def test_response_content_type_default_when_unsupported(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.get_response(self.TestRouter()) + self.assertEqual(result.status_int, 200) + self.assertEqual(result.headers["Content-Type"], "application/json") + + +class RequestTest(test.TestCase): + + def test_request_content_type_missing(self): + request = wsgi.Request.blank('/tests/123') + request.body = "<body />" + self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) + + def test_request_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "text/html" + request.body = "asdf<br />" + self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) + + def test_content_type_from_accept_xml(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = \ + "application/json; q=0.3, application/xml; q=0.9" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") class SerializerTest(test.TestCase): - def match(self, url, accept, expect): + def test_xml(self): input_dict = dict(servers=dict(a=(2, 3))) expected_xml = '<servers><a>(2,3)</a></servers>' + serializer = wsgi.Serializer() + result = serializer.serialize(input_dict, "application/xml") + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_xml) + + def test_json(self): + input_dict = dict(servers=dict(a=(2, 3))) expected_json = '{"servers":{"a":[2,3]}}' - req = webob.Request.blank(url, headers=dict(Accept=accept)) - result = wsgi.Serializer(req.environ).to_content_type(input_dict) + serializer = wsgi.Serializer() + result = serializer.serialize(input_dict, "application/json") result = result.replace('\n', '').replace(' ', '') - if expect == 'xml': - self.assertEqual(result, expected_xml) - elif expect == 'json': - self.assertEqual(result, expected_json) - else: - raise "Bad expect value" - - def test_basic(self): - self.match('/servers/4.json', None, expect='json') - self.match('/servers/4', 'application/json', expect='json') - self.match('/servers/4', 'application/xml', expect='xml') - self.match('/servers/4.xml', None, expect='xml') - - def test_defaults_to_json(self): - self.match('/servers/4', None, expect='json') - self.match('/servers/4', 'text/html', expect='json') - - def test_suffix_takes_precedence_over_accept_header(self): - self.match('/servers/4.xml', 'application/json', expect='xml') - self.match('/servers/4.xml.', 'application/json', expect='json') - - def test_deserialize(self): + self.assertEqual(result, expected_json) + + def test_unsupported_content_type(self): + serializer = wsgi.Serializer() + self.assertRaises(exception.InvalidContentType, serializer.serialize, + {}, "text/null") + + def test_deserialize_json(self): + data = """{"a": { + "a1": "1", + "a2": "2", + "bs": ["1", "2", "3", {"c": {"c1": "1"}}], + "d": {"e": "1"}, + "f": "1"}}""" + as_dict = dict(a={ + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': dict(c1='1')}], + 'd': {'e': '1'}, + 'f': '1'}) + metadata = {} + serializer = wsgi.Serializer(metadata) + self.assertEqual(serializer.deserialize(data, "application/json"), + as_dict) + + def test_deserialize_xml(self): xml = """ <a a1="1" a2="2"> <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs> @@ -137,11 +240,13 @@ class SerializerTest(test.TestCase): 'd': {'e': '1'}, 'f': '1'}) metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})} - serializer = wsgi.Serializer({}, metadata) - self.assertEqual(serializer.deserialize(xml), as_dict) + serializer = wsgi.Serializer(metadata) + self.assertEqual(serializer.deserialize(xml, "application/xml"), + as_dict) def test_deserialize_empty_xml(self): xml = """<a></a>""" as_dict = {"a": {}} - serializer = wsgi.Serializer({}) - self.assertEqual(serializer.deserialize(xml), as_dict) + serializer = wsgi.Serializer() + self.assertEqual(serializer.deserialize(xml, "application/xml"), + as_dict) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 3ff8d7ce5..5872552ec 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -35,23 +35,28 @@ class FakeGlance(object): IMAGE_FIXTURES = { IMAGE_MACHINE: { 'image_meta': {'name': 'fakemachine', 'size': 0, - 'type': 'machine'}, + 'disk_format': 'ami', + 'container_format': 'ami'}, 'image_data': StringIO.StringIO('')}, IMAGE_KERNEL: { 'image_meta': {'name': 'fakekernel', 'size': 0, - 'type': 'kernel'}, + 'disk_format': 'aki', + 'container_format': 'aki'}, 'image_data': StringIO.StringIO('')}, IMAGE_RAMDISK: { 'image_meta': {'name': 'fakeramdisk', 'size': 0, - 'type': 'ramdisk'}, + 'disk_format': 'ari', + 'container_format': 'ari'}, 'image_data': StringIO.StringIO('')}, IMAGE_RAW: { 'image_meta': {'name': 'fakeraw', 'size': 0, - 'type': 'raw'}, + 'disk_format': 'raw', + 'container_format': 'bare'}, 'image_data': StringIO.StringIO('')}, IMAGE_VHD: { 'image_meta': {'name': 'fakevhd', 'size': 0, - 'type': 'vhd'}, + 'disk_format': 'vhd', + 'container_format': 'ovf'}, 'image_data': StringIO.StringIO('')}} def __init__(self, host, port=None, use_ssl=False): diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 061910013..b195fa520 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -267,7 +267,7 @@ class CloudTestCase(test.TestCase): self._create_key('test1') self._create_key('test2') result = self.cloud.describe_key_pairs(self.context) - keys = result["keypairsSet"] + keys = result["keySet"] self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 949b5e6eb..58493d7ac 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -57,7 +57,7 @@ class ComputeTestCase(test.TestCase): self.manager.delete_project(self.project) super(ComputeTestCase, self).tearDown() - def _create_instance(self): + def _create_instance(self, params={}): """Create a test instance""" inst = {} inst['image_id'] = 'ami-test' @@ -68,6 +68,7 @@ class ComputeTestCase(test.TestCase): inst['instance_type'] = 'm1.tiny' inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 + inst.update(params) return db.instance_create(self.context, inst)['id'] def _create_group(self): @@ -268,9 +269,30 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(self.context, instance_id) + def test_resize_instance(self): + """Ensure instance can be migrated/resized""" + instance_id = self._create_instance() + context = self.context.elevated() + self.compute.run_instance(self.context, instance_id) + db.instance_update(self.context, instance_id, {'host': 'foo'}) + self.compute.prep_resize(context, instance_id) + migration_ref = db.migration_get_by_instance_and_status(context, + instance_id, 'pre-migrating') + self.compute.resize_instance(context, instance_id, + migration_ref['id']) + self.compute.terminate_instance(context, instance_id) + def test_get_by_flavor_id(self): type = instance_types.get_by_flavor_id(1) self.assertEqual(type, 'm1.tiny') + def test_resize_same_source_fails(self): + """Ensure instance fails to migrate when source and destination are + the same host""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.assertRaises(exception.Error, self.compute.prep_resize, + self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) type = instance_types.get_by_flavor_id("1") self.assertEqual(type, 'm1.tiny') diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py index b6bfab534..85bfcfd85 100644 --- a/nova/tests/test_direct.py +++ b/nova/tests/test_direct.py @@ -59,6 +59,7 @@ class DirectTestCase(test.TestCase): req.headers['X-OpenStack-User'] = 'user1' req.headers['X-OpenStack-Project'] = 'proj1' resp = req.get_response(self.auth_router) + self.assertEqual(resp.status_int, 200) data = json.loads(resp.body) self.assertEqual(data['user'], 'user1') self.assertEqual(data['project'], 'proj1') @@ -69,6 +70,7 @@ class DirectTestCase(test.TestCase): req.method = 'POST' req.body = 'json=%s' % json.dumps({'data': 'foo'}) resp = req.get_response(self.router) + self.assertEqual(resp.status_int, 200) resp_parsed = json.loads(resp.body) self.assertEqual(resp_parsed['data'], 'foo') @@ -78,6 +80,7 @@ class DirectTestCase(test.TestCase): req.method = 'POST' req.body = 'data=foo' resp = req.get_response(self.router) + self.assertEqual(resp.status_int, 200) resp_parsed = json.loads(resp.body) self.assertEqual(resp_parsed['data'], 'foo') diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index 9f572b58e..a658e4978 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -71,30 +71,33 @@ class LockTestCase(test.TestCase): "got mangled") def test_synchronized(self): - rpipe, wpipe = os.pipe() + rpipe1, wpipe1 = os.pipe() + rpipe2, wpipe2 = os.pipe() + + @synchronized('testlock') + def f(rpipe, wpipe): + try: + os.write(wpipe, "foo") + except OSError, e: + self.assertEquals(e.errno, errno.EPIPE) + return + + rfds, _, __ = select.select([rpipe], [], [], 1) + self.assertEquals(len(rfds), 0, "The other process, which was" + " supposed to be locked, " + "wrote on its end of the " + "pipe") + os.close(rpipe) + pid = os.fork() if pid > 0: - os.close(wpipe) - - @synchronized('testlock') - def f(): - rfds, _, __ = select.select([rpipe], [], [], 1) - self.assertEquals(len(rfds), 0, "The other process, which was" - " supposed to be locked, " - "wrote on its end of the " - "pipe") - os.close(rpipe) - - f() + os.close(wpipe1) + os.close(rpipe2) + + f(rpipe1, wpipe2) else: - os.close(rpipe) + os.close(rpipe1) + os.close(wpipe2) - @synchronized('testlock') - def g(): - try: - os.write(wpipe, "foo") - except OSError, e: - self.assertEquals(e.errno, errno.EPIPE) - return - g() + f(rpipe2, wpipe1) os._exit(0) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 70080db38..53e35ce7e 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -485,13 +485,13 @@ def lease_ip(private_ip): private_ip) instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip) - cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'), - instance_ref['mac_address'], - private_ip) + cmd = (binpath('nova-dhcpbridge'), 'add', + instance_ref['mac_address'], + private_ip, 'fake') env = {'DNSMASQ_INTERFACE': network_ref['bridge'], 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) + (out, err) = utils.execute(*cmd, addl_env=env) LOG.debug("ISSUE_IP: %s, %s ", out, err) @@ -501,11 +501,11 @@ def release_ip(private_ip): private_ip) instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip) - cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'), - instance_ref['mac_address'], - private_ip) + cmd = (binpath('nova-dhcpbridge'), 'del', + instance_ref['mac_address'], + private_ip, 'fake') env = {'DNSMASQ_INTERFACE': network_ref['bridge'], 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) + (out, err) = utils.execute(*cmd, addl_env=env) LOG.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index b2a937a4e..93ed5b675 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -322,23 +322,25 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) # self.fw.add_instance(instance_ref) - def fake_iptables_execute(cmd, process_input=None, attempts=5): - if cmd == 'sudo ip6tables-save -t filter': + def fake_iptables_execute(*cmd, **kwargs): + process_input = kwargs.get('process_input', None) + if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'): return '\n'.join(self.in6_filter_rules), None - if cmd == 'sudo iptables-save -t filter': + if cmd == ('sudo', 'iptables-save', '-t', 'filter'): return '\n'.join(self.in_filter_rules), None - if cmd == 'sudo iptables-save -t nat': + if cmd == ('sudo', 'iptables-save', '-t', 'nat'): return '\n'.join(self.in_nat_rules), None - if cmd == 'sudo iptables-restore': + if cmd == ('sudo', 'iptables-restore'): lines = process_input.split('\n') if '*filter' in lines: self.out_rules = lines return '', '' - if cmd == 'sudo ip6tables-restore': + if cmd == ('sudo', 'ip6tables-restore'): lines = process_input.split('\n') if '*filter' in lines: self.out6_rules = lines return '', '' + print cmd, kwargs from nova.network import linux_net linux_net.iptables_manager.execute = fake_iptables_execute diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 106c0bd6f..c26dc8639 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -346,6 +346,56 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): super(XenAPIDiffieHellmanTestCase, self).tearDown() +class XenAPIMigrateInstance(test.TestCase): + """ + Unit test for verifying migration-related actions + """ + + def setUp(self): + super(XenAPIMigrateInstance, self).setUp() + self.stubs = stubout.StubOutForTesting() + FLAGS.target_host = '127.0.0.1' + FLAGS.xenapi_connection_url = 'test_url' + FLAGS.xenapi_connection_password = 'test_pass' + db_fakes.stub_out_db_instance_api(self.stubs) + stubs.stub_out_get_target(self.stubs) + xenapi_fake.reset() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.values = {'name': 1, 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': None, + 'ramdisk_id': None, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } + stubs.stub_out_migration_methods(self.stubs) + glance_stubs.stubout_glance_client(self.stubs, + glance_stubs.FakeGlance) + + def tearDown(self): + super(XenAPIMigrateInstance, self).tearDown() + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + self.stubs.UnsetAll() + + def test_migrate_disk_and_power_off(self): + instance = db.instance_create(self.values) + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + conn = xenapi_conn.get_connection(False) + conn.migrate_disk_and_power_off(instance, '127.0.0.1') + + def test_finish_resize(self): + instance = db.instance_create(self.values) + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + conn = xenapi_conn.get_connection(False) + conn.finish_resize(instance, dict(base_copy='hurr', cow='durr')) + + class XenAPIDetermineDiskImageTestCase(test.TestCase): """ Unit tests for code that detects the ImageType diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 3de73d617..70d46a1fb 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -20,6 +20,7 @@ from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils from nova.virt.xenapi import vm_utils +from nova.virt.xenapi import vmops def stubout_instance_snapshot(stubs): @@ -217,3 +218,60 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): def SR_forget(self, _1, ref): pass + + +class FakeSessionForMigrationTests(fake.SessionBase): + """Stubs out a XenAPISession for Migration tests""" + def __init__(self, uri): + super(FakeSessionForMigrationTests, self).__init__(uri) + + def VDI_get_by_uuid(*args): + return 'hurr' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + vm['is_a_template'] = False + vm['is_control_domain'] = False + + +def stub_out_migration_methods(stubs): + def fake_get_snapshot(self, instance): + return 'foo', 'bar' + + @classmethod + def fake_get_vdi(cls, session, vm_ref): + vdi_ref = fake.create_vdi(name_label='derp', read_only=False, + sr_ref='herp', sharable=False) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + return vdi_ref, {'uuid': vdi_rec['uuid'], } + + def fake_shutdown(self, inst, vm, method='clean'): + pass + + @classmethod + def fake_sr(cls, session, *args): + pass + + @classmethod + def fake_get_sr_path(cls, *args): + return "fake" + + def fake_destroy(*args, **kwargs): + pass + + def fake_reset_network(*args, **kwargs): + pass + + stubs.Set(vmops.VMOps, '_destroy', fake_destroy) + stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr) + stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr) + stubs.Set(vmops.VMOps, '_get_snapshot', fake_get_snapshot) + stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi) + stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None) + stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path) + stubs.Set(vmops.VMOps, 'reset_network', fake_reset_network) + stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown) diff --git a/nova/utils.py b/nova/utils.py index 80e5b6bbe..3008a512e 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -42,7 +42,7 @@ from xml.sax import saxutils from eventlet import event from eventlet import greenthread from eventlet.green import subprocess - +None from nova import exception from nova.exception import ProcessExecutionError from nova import flags @@ -129,20 +129,28 @@ def fetchfile(url, target): # c.perform() # c.close() # fp.close() - execute("curl --fail %s -o %s" % (url, target)) + execute("curl", "--fail", url, "-o", target) + +def execute(*cmd, **kwargs): + process_input = kwargs.get('process_input', None) + addl_env = kwargs.get('addl_env', None) + check_exit_code = kwargs.get('check_exit_code', 0) + stdin = kwargs.get('stdin', subprocess.PIPE) + stdout = kwargs.get('stdout', subprocess.PIPE) + stderr = kwargs.get('stderr', subprocess.PIPE) + attempts = kwargs.get('attempts', 1) + cmd = map(str, cmd) -def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, - attempts=1): while attempts > 0: attempts -= 1 try: - LOG.debug(_("Running cmd (subprocess): %s"), cmd) + LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd)) env = os.environ.copy() if addl_env: env.update(addl_env) - obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) + obj = subprocess.Popen(cmd, stdin=stdin, + stdout=stdout, stderr=stderr, env=env) result = None if process_input != None: result = obj.communicate(process_input) @@ -151,15 +159,16 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, obj.stdin.close() if obj.returncode: LOG.debug(_("Result was %s") % obj.returncode) - if check_exit_code and obj.returncode != 0: + if type(check_exit_code) == types.IntType \ + and obj.returncode != check_exit_code: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, stdout=stdout, stderr=stderr, - cmd=cmd) - # NOTE(termie): this appears to be necessary to let the subprocess - # call clean something up in between calls, without - # it two execute calls in a row hangs the second one + cmd=' '.join(cmd)) + # NOTE(termie): this appears to be necessary to let the subprocess call + # clean something up in between calls, without it two + # execute calls in a row hangs the second one greenthread.sleep(0) return result except ProcessExecutionError: @@ -172,7 +181,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): - LOG.debug(_("Running cmd (SSH): %s"), cmd) + LOG.debug(_("Running cmd (SSH): %s"), ' '.join(cmd)) if addl_env: raise exception.Error("Environment not supported over SSH") @@ -201,7 +210,7 @@ def ssh_execute(ssh, cmd, process_input=None, raise exception.ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, - cmd=cmd) + cmd=' '.join(cmd)) return (stdout, stderr) @@ -234,9 +243,9 @@ def debug(arg): return arg -def runthis(prompt, cmd, check_exit_code=True): - LOG.debug(_("Running %s"), (cmd)) - rv, err = execute(cmd, check_exit_code=check_exit_code) +def runthis(prompt, *cmd, **kwargs): + LOG.debug(_("Running %s"), (" ".join(cmd))) + rv, err = execute(*cmd, **kwargs) def generate_uid(topic, size=8): @@ -268,7 +277,7 @@ def last_octet(address): def get_my_linklocal(interface): try: - if_str = execute("ip -f inet6 -o addr show %s" % interface) + if_str = execute("ip", "-f", "inet6", "-o", "addr", "show", interface) condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link" links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] diff --git a/nova/virt/disk.py b/nova/virt/disk.py index 2bded07a4..a54cda003 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -49,10 +49,10 @@ def extend(image, size): file_size = os.path.getsize(image) if file_size >= size: return - utils.execute('truncate -s %s %s' % (size, image)) + utils.execute('truncate', '-s', size, image) # NOTE(vish): attempts to resize filesystem - utils.execute('e2fsck -fp %s' % image, check_exit_code=False) - utils.execute('resize2fs %s' % image, check_exit_code=False) + utils.execute('e2fsck', '-fp', mage, check_exit_code=False) + utils.execute('resize2fs', image, check_exit_code=False) def inject_data(image, key=None, net=None, partition=None, nbd=False): @@ -68,7 +68,7 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): try: if not partition is None: # create partition - out, err = utils.execute('sudo kpartx -a %s' % device) + out, err = utils.execute('sudo', 'kpartx', '-a', device) if err: raise exception.Error(_('Failed to load partition: %s') % err) mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], @@ -84,13 +84,14 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): mapped_device) # Configure ext2fs so that it doesn't auto-check every N boots - out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) + out, err = utils.execute('sudo', 'tune2fs', + '-c', 0, '-i', 0, mapped_device) tmpdir = tempfile.mkdtemp() try: # mount loopback to dir out, err = utils.execute( - 'sudo mount %s %s' % (mapped_device, tmpdir)) + 'sudo', 'mount', mapped_device, tmpdir) if err: raise exception.Error(_('Failed to mount filesystem: %s') % err) @@ -103,13 +104,13 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): _inject_net_into_fs(net, tmpdir) finally: # unmount device - utils.execute('sudo umount %s' % mapped_device) + utils.execute('sudo', 'umount', mapped_device) finally: # remove temporary directory - utils.execute('rmdir %s' % tmpdir) + utils.execute('rmdir', tmpdir) if not partition is None: # remove partitions - utils.execute('sudo kpartx -d %s' % device) + utils.execute('sudo', 'kpartx', '-d', device) finally: _unlink_device(device, nbd) @@ -118,7 +119,7 @@ def _link_device(image, nbd): """Link image to device using loopback or nbd""" if nbd: device = _allocate_device() - utils.execute('sudo qemu-nbd -c %s %s' % (device, image)) + utils.execute('sudo', 'qemu-nbd', '-c', device, image) # NOTE(vish): this forks into another process, so give it a chance # to set up before continuuing for i in xrange(FLAGS.timeout_nbd): @@ -127,7 +128,7 @@ def _link_device(image, nbd): time.sleep(1) raise exception.Error(_('nbd device %s did not show up') % device) else: - out, err = utils.execute('sudo losetup --find --show %s' % image) + out, err = utils.execute('sudo', 'losetup', '--find', '--show', image) if err: raise exception.Error(_('Could not attach image to loopback: %s') % err) @@ -137,10 +138,10 @@ def _link_device(image, nbd): def _unlink_device(device, nbd): """Unlink image from device using loopback or nbd""" if nbd: - utils.execute('sudo qemu-nbd -d %s' % device) + utils.execute('sudo', 'qemu-nbd', '-d', device) _free_device(device) else: - utils.execute('sudo losetup --detach %s' % device) + utils.execute('sudo', 'losetup', '--detach', device) _DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)] @@ -170,11 +171,12 @@ def _inject_key_into_fs(key, fs): fs is the path to the base of the filesystem into which to inject the key. """ sshdir = os.path.join(fs, 'root', '.ssh') - utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter - utils.execute('sudo chown root %s' % sshdir) - utils.execute('sudo chmod 700 %s' % sshdir) + utils.execute('sudo', 'mkdir', '-p', sshdir) # existing dir doesn't matter + utils.execute('sudo', 'chown', 'root', sshdir) + utils.execute('sudo', 'chmod', '700', sshdir) keyfile = os.path.join(sshdir, 'authorized_keys') - utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') + utils.execute('sudo', 'tee', '-a', keyfile, + process_input='\n' + key.strip() + '\n') def _inject_net_into_fs(net, fs): @@ -183,8 +185,8 @@ def _inject_net_into_fs(net, fs): net is the contents of /etc/network/interfaces. """ netdir = os.path.join(os.path.join(fs, 'etc'), 'network') - utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter - utils.execute('sudo chown root:root %s' % netdir) - utils.execute('sudo chmod 755 %s' % netdir) + utils.execute('sudo', 'mkdir', '-p', netdir) # existing dir doesn't matter + utils.execute('sudo', 'chown', 'root:root', netdir) + utils.execute('sudo', 'chmod', 755, netdir) netfile = os.path.join(netdir, 'interfaces') - utils.execute('sudo tee %s' % netfile, net) + utils.execute('sudo', 'tee', netfile, net) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 4346dffc1..c744acf91 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -139,6 +139,24 @@ class FakeConnection(object): """ pass + def get_host_ip_addr(self): + """ + Retrieves the IP address of the dom0 + """ + pass + + def resize(self, instance, flavor): + """ + Resizes/Migrates the specified instance. + + The flavor parameter determines whether or not the instance RAM and + disk space are modified, and if so, to what size. + + The work will be done asynchronously. This function returns a task + that allows the caller to detect when it is complete. + """ + pass + def set_admin_password(self, instance, new_pass): """ Set the root password on the specified instance. @@ -179,6 +197,19 @@ class FakeConnection(object): """ pass + def migrate_disk_and_power_off(self, instance, dest): + """ + Transfers the disk of a running instance in multiple phases, turning + off the instance before the end. + """ + pass + + def attach_disk(self, instance, disk_info): + """ + Attaches the disk to an instance given the metadata disk_info + """ + pass + def pause(self, instance, callback): """ Pause the specified instance. diff --git a/nova/virt/images.py b/nova/virt/images.py index 7a6fef330..4b11d1667 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -94,8 +94,7 @@ def _fetch_s3_image(image, path, user, project): cmd += ['-H', '\'%s: %s\'' % (k, v)] cmd += ['-o', path] - cmd_out = ' '.join(cmd) - return utils.execute(cmd_out) + return utils.execute(*cmd) def _fetch_local_image(image, path, user, project): @@ -103,7 +102,7 @@ def _fetch_local_image(image, path, user, project): if sys.platform.startswith('win'): return shutil.copy(source, path) else: - return utils.execute('cp %s %s' % (source, path)) + return utils.execute('cp', source, path) def _image_path(path): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 825bcb0d7..b7167c865 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -436,8 +436,10 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): LOG.info(_("cool, it's a device")) - out, err = utils.execute("sudo dd if=%s iflag=nonblock" % - virsh_output, check_exit_code=False) + out, err = utils.execute('sudo', 'dd', + "if=%s" % virsh_output, + 'iflag=nonblock', + check_exit_code=False) return out else: return '' @@ -459,11 +461,11 @@ class LibvirtConnection(object): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - utils.execute('sudo chown %d %s' % (os.getuid(), console_log)) + utils.execute('sudo', 'chown', s.getuid(), console_log) if FLAGS.libvirt_type == 'xen': # Xen is special - virsh_output = utils.execute("virsh ttyconsole %s" % + virsh_output = utils.execute('virsh', 'ttyconsole', instance['name']) data = self._flush_xen_console(virsh_output) fpath = self._append_to_file(data, console_log) @@ -480,9 +482,10 @@ class LibvirtConnection(object): port = random.randint(int(start_port), int(end_port)) # netcat will exit with 0 only if the port is in use, # so a nonzero return value implies it is unused - cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port) - stdout, stderr = utils.execute(cmd) - if stdout.strip() == 'free': + cmd = 'netcat', '0.0.0.0', port, '-w', '1' + try: + stdout, stderr = utils.execute(*cmd, process_input='') + except ProcessExecutionError: return port raise Exception(_('Unable to find an open port')) @@ -531,11 +534,11 @@ class LibvirtConnection(object): if not os.path.exists(base): fn(target=base, *args, **kwargs) if cow: - utils.execute('qemu-img create -f qcow2 -o ' - 'cluster_size=2M,backing_file=%s %s' - % (base, target)) + utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o', + 'cluster_size=2M,backing_file=%s' % base, + target) else: - utils.execute('cp %s %s' % (base, target)) + utils.execute('cp', base, target) def _fetch_image(self, target, image_id, user, project, size=None): """Grab image and optionally attempt to resize it""" @@ -545,7 +548,7 @@ class LibvirtConnection(object): def _create_local(self, target, local_gb): """Create a blank image of specified size""" - utils.execute('truncate %s -s %dG' % (target, local_gb)) + utils.execute('truncate', target, '-s', "%dG" % local_gb) # TODO(vish): should we format disk by default? def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None): @@ -556,7 +559,7 @@ class LibvirtConnection(object): fname + suffix) # ensure directories exist and are writable - utils.execute('mkdir -p %s' % basepath(suffix='')) + utils.execute('mkdir', '-p', basepath(suffix='')) LOG.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') @@ -656,7 +659,7 @@ class LibvirtConnection(object): ' data into image %(img_id)s (%(e)s)') % locals()) if FLAGS.libvirt_type == 'uml': - utils.execute('sudo chown root %s' % basepath('disk')) + utils.execute('sudo', 'chown', 'root', basepath('disk')) def to_xml(self, instance, rescue=False): # TODO(termie): cache? diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index e1ae03e70..ba12d4d3a 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -290,6 +290,9 @@ class SessionBase(object): #Always return 12GB available return 12 * 1024 * 1024 * 1024 + def host_call_plugin(*args): + return 'herp' + def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index a6de3c9aa..4e6c71446 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -253,16 +253,31 @@ class VMHelper(HelperBase): return vdi_ref @classmethod + def get_vdi_for_vm_safely(cls, session, vm_ref): + vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) + if vdi_refs is None: + raise Exception(_("No VDIs found for VM %s") % vm_ref) + else: + num_vdis = len(vdi_refs) + if num_vdis != 1: + raise Exception( + _("Unexpected number of VDIs (%(num_vdis)s) found" + " for VM %(vm_ref)s") % locals()) + + vdi_ref = vdi_refs[0] + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + return vdi_ref, vdi_rec + + @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): - """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, - Snapshot VHD - """ + """Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, + Snapshot VHD""" #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...") % locals()) - vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] sr_ref = vm_vdi_rec["SR"] @@ -270,7 +285,8 @@ class VMHelper(HelperBase): task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) template_vm_ref = session.wait_for_task(task, instance_id) - template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] + template_vdi_rec = cls.get_vdi_for_vm_safely(session, + template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] LOG.debug(_('Created snapshot %(template_vm_ref)s from' @@ -285,6 +301,24 @@ class VMHelper(HelperBase): return template_vm_ref, template_vdi_uuids @classmethod + def get_sr(cls, session, sr_label='slices'): + """Finds the SR named by the given name label and returns + the UUID""" + return session.call_xenapi('SR.get_by_name_label', sr_label)[0] + + @classmethod + def get_sr_path(cls, session): + """Return the path to our storage repository + + This is used when we're dealing with VHDs directly, either by taking + snapshots or by restoring an image in the DISK_VHD format. + """ + sr_ref = safe_find_sr(session) + sr_rec = session.get_xenapi().SR.get_record(sr_ref) + sr_uuid = sr_rec["uuid"] + return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) + + @classmethod def upload_image(cls, session, instance_id, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. @@ -298,7 +332,7 @@ class VMHelper(HelperBase): 'image_id': image_id, 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port, - 'sr_path': get_sr_path(session)} + 'sr_path': cls.get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) @@ -341,13 +375,13 @@ class VMHelper(HelperBase): 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port, 'uuid_stack': uuid_stack, - 'sr_path': get_sr_path(session)} + 'sr_path': cls.get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) vdi_uuid = session.wait_for_task(task, instance_id) - scan_sr(session, instance_id, sr_ref) + cls.scan_sr(session, instance_id, sr_ref) # Set the name-label to ease debugging vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) @@ -433,19 +467,21 @@ class VMHelper(HelperBase): "%(image_id)s, instance %(instance_id)s") % locals()) def determine_from_glance(): - glance_type2nova_type = {'machine': ImageType.DISK, - 'raw': ImageType.DISK_RAW, - 'vhd': ImageType.DISK_VHD, - 'kernel': ImageType.KERNEL_RAMDISK, - 'ramdisk': ImageType.KERNEL_RAMDISK} + glance_disk_format2nova_type = { + 'ami': ImageType.DISK, + 'aki': ImageType.KERNEL_RAMDISK, + 'ari': ImageType.KERNEL_RAMDISK, + 'raw': ImageType.DISK_RAW, + 'vhd': ImageType.DISK_VHD} client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) meta = client.get_image_meta(instance.image_id) - type_ = meta['type'] + disk_format = meta['disk_format'] try: - return glance_type2nova_type[type_] + return glance_disk_format2nova_type[disk_format] except KeyError: raise exception.NotFound( - _("Unrecognized image type '%(type_)s'") % locals()) + _("Unrecognized disk_format '%(disk_format)s'") + % locals()) def determine_from_instance(): if instance.kernel_id: @@ -609,6 +645,21 @@ class VMHelper(HelperBase): except cls.XenAPI.Failure as e: return {"Unable to retrieve diagnostics": e} + @classmethod + def scan_sr(cls, session, instance_id=None, sr_ref=None): + """Scans the SR specified by sr_ref""" + if sr_ref: + LOG.debug(_("Re-scanning SR %s"), sr_ref) + task = session.call_xenapi('Async.SR.scan', sr_ref) + session.wait_for_task(task, instance_id) + + @classmethod + def scan_default_sr(cls, session): + """Looks for the system default SR and triggers a re-scan""" + #FIXME(sirp/mdietz): refactor scan_default_sr in there + sr_ref = cls.get_sr(session) + session.call_xenapi('SR.scan', sr_ref) + def get_rrd(host, uuid): """Return the VM RRD XML as a string""" @@ -651,12 +702,6 @@ def get_vhd_parent_uuid(session, vdi_ref): return None -def scan_sr(session, instance_id, sr_ref): - LOG.debug(_("Re-scanning SR %s"), sr_ref) - task = session.call_xenapi('Async.SR.scan', sr_ref) - session.wait_for_task(task, instance_id) - - def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, original_parent_uuid): """ Spin until the parent VHD is coalesced into its parent VHD @@ -681,7 +726,7 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, " %(max_attempts)d), giving up...") % locals()) raise exception.Error(msg) - scan_sr(session, instance_id, sr_ref) + VMHelper.scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent" @@ -738,18 +783,6 @@ def find_sr(session): return None -def get_sr_path(session): - """Return the path to our storage repository - - This is used when we're dealing with VHDs directly, either by taking - snapshots or by restoring an image in the DISK_VHD format. - """ - sr_ref = safe_find_sr(session) - sr_rec = session.get_xenapi().SR.get_record(sr_ref) - sr_uuid = sr_rec["uuid"] - return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) - - def remap_vbd_dev(dev): """Return the appropriate location for a plugged-in VBD device @@ -884,14 +917,13 @@ def _write_partition(virtual_size, dev): LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d' ' to %(dest)s...') % locals()) - def execute(cmd, process_input=None, check_exit_code=True): - return utils.execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + def execute(*cmd, **kwargs): + return utils.execute(*cmd, **kwargs) - execute('parted --script %s mklabel msdos' % dest) - execute('parted --script %s mkpart primary %ds %ds' % - (dest, primary_first, primary_last)) + execute('parted', '--script', dest, 'mklabel', 'msdos') + execute('parted', '--script', dest, 'mkpart', 'primary', + '%ds' % primary_first, + '%ds' % primary_last) LOG.debug(_('Writing partition table %s done.'), dest) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 9ac83efb0..562ecd4d5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -22,6 +22,7 @@ Management class for VM-related functions (spawn, reboot, etc). import json import M2Crypto import os +import pickle import subprocess import tempfile import uuid @@ -61,13 +62,35 @@ class VMOps(object): vms.append(rec["name_label"]) return vms + def _start(self, instance, vm_ref=None): + """Power on a VM instance""" + if not vm_ref: + vm_ref = VMHelper.lookup(self._session, instance.name) + if vm_ref is None: + raise exception(_('Attempted to power on non-existent instance' + ' bad instance id %s') % instance.id) + LOG.debug(_("Starting instance %s"), instance.name) + self._session.call_xenapi('VM.start', vm_ref, False, False) + + def create_disk(self, instance): + user = AuthManager().get_user(instance.user_id) + project = AuthManager().get_project(instance.project_id) + disk_image_type = VMHelper.determine_disk_image_type(instance) + vdi_uuid = VMHelper.fetch_image(self._session, instance.id, + instance.image_id, user, project, disk_image_type) + return vdi_uuid + def spawn(self, instance): + vdi_uuid = self.create_disk(instance) + self._spawn_with_disk(instance, vdi_uuid=vdi_uuid) + + def _spawn_with_disk(self, instance, vdi_uuid): """Create VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is not None: raise exception.Duplicate(_('Attempted to create' - ' non-unique name %s') % instance_name) + ' non-unique name %s') % instance_name) #ensure enough free memory is available if not VMHelper.ensure_free_mem(self._session, instance): @@ -81,16 +104,14 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - disk_image_type = VMHelper.determine_disk_image_type(instance) - - vdi_uuid = VMHelper.fetch_image(self._session, instance.id, - instance.image_id, user, project, disk_image_type) + kernel = ramdisk = pv_kernel = None + # Are we building from a pre-existing disk? vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - pv_kernel = False + disk_image_type = VMHelper.determine_disk_image_type(instance) if disk_image_type == ImageType.DISK_RAW: - #Have a look at the VDI and see if it has a PV kernel + # Have a look at the VDI and see if it has a PV kernel pv_kernel = VMHelper.lookup_image(self._session, instance.id, vdi_ref) elif disk_image_type == ImageType.DISK_VHD: @@ -98,19 +119,18 @@ class VMOps(object): # configurable as Windows will use HVM. pv_kernel = True - kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) - ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) - VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=vdi_ref, userdevice=0, bootable=True) # inject_network_info and create vifs networks = self.inject_network_info(instance) @@ -172,35 +192,38 @@ class VMOps(object): """Refactored out the common code of many methods that receive either a vm name or a vm instance, and want a vm instance in return. """ - vm = None - try: - if instance_or_vm.startswith("OpaqueRef:"): - # Got passed an opaque ref; return it + # if instance_or_vm is a string it must be opaque ref or instance name + if isinstance(instance_or_vm, basestring): + obj = None + try: + # check for opaque ref + obj = self._session.get_xenapi().VM.get_record(instance_or_vm) return instance_or_vm - else: - # Must be the instance name + except self.XenAPI.Failure: + # wasn't an opaque ref, must be an instance name instance_name = instance_or_vm - except (AttributeError, KeyError): - # Note the the KeyError will only happen with fakes.py - # Not a string; must be an ID or a vm instance - if isinstance(instance_or_vm, (int, long)): - ctx = context.get_admin_context() - try: - instance_obj = db.instance_get(ctx, instance_or_vm) - instance_name = instance_obj.name - except exception.NotFound: - # The unit tests screw this up, as they use an integer for - # the vm name. I'd fix that up, but that's a matter for - # another bug report. So for now, just try with the passed - # value - instance_name = instance_or_vm - else: - instance_name = instance_or_vm.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: + + # if instance_or_vm is an int/long it must be instance id + elif isinstance(instance_or_vm, (int, long)): + ctx = context.get_admin_context() + try: + instance_obj = db.instance_get(ctx, instance_or_vm) + instance_name = instance_obj.name + except exception.NotFound: + # The unit tests screw this up, as they use an integer for + # the vm name. I'd fix that up, but that's a matter for + # another bug report. So for now, just try with the passed + # value + instance_name = instance_or_vm + + # otherwise instance_or_vm is an instance object + else: + instance_name = instance_or_vm.name + vm_ref = VMHelper.lookup(self._session, instance_name) + if vm_ref is None: raise exception.NotFound( _('Instance not present %s') % instance_name) - return vm + return vm_ref def _acquire_bootlock(self, vm): """Prevent an instance from booting""" @@ -217,7 +240,7 @@ class VMOps(object): "start") def snapshot(self, instance, image_id): - """ Create snapshot from a running VM instance + """Create snapshot from a running VM instance :param instance: instance to be snapshotted :param image_id: id of image to upload to @@ -238,7 +261,20 @@ class VMOps(object): that will bundle the VHDs together and then push the bundle into Glance. """ + template_vm_ref = None + try: + template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) + # call plugin to ship snapshot off to glance + VMHelper.upload_image( + self._session, instance.id, template_vdi_uuids, image_id) + finally: + if template_vm_ref: + self._destroy(instance, template_vm_ref, + shutdown=False, destroy_kernel_ramdisk=False) + + logging.debug(_("Finished snapshot and upload for VM %s"), instance) + def _get_snapshot(self, instance): #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added @@ -249,20 +285,89 @@ class VMOps(object): try: template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) + return template_vm_ref, template_vdi_uuids except self.XenAPI.Failure, exc: logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s") % locals()) return + def migrate_disk_and_power_off(self, instance, dest): + """Copies a VHD from one host machine to another + + :param instance: the instance that owns the VHD in question + :param dest: the destination host machine + :param disk_type: values are 'primary' or 'cow' + """ + vm_ref = VMHelper.lookup(self._session, instance.name) + + # The primary VDI becomes the COW after the snapshot, and we can + # identify it via the VBD. The base copy is the parent_uuid returned + # from the snapshot creation + + base_copy_uuid = cow_uuid = None + template_vdi_uuids = template_vm_ref = None try: - # call plugin to ship snapshot off to glance - VMHelper.upload_image( - self._session, instance.id, template_vdi_uuids, image_id) + # transfer the base copy + template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) + base_copy_uuid = template_vdi_uuids[1] + vdi_ref, vm_vdi_rec = \ + VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) + cow_uuid = vm_vdi_rec['uuid'] + + params = {'host': dest, + 'vdi_uuid': base_copy_uuid, + 'instance_id': instance.id, + 'sr_path': VMHelper.get_sr_path(self._session)} + + task = self._session.async_call_plugin('migration', 'transfer_vhd', + {'params': pickle.dumps(params)}) + self._session.wait_for_task(task, instance.id) + + # Now power down the instance and transfer the COW VHD + self._shutdown(instance, vm_ref, method='clean') + + params = {'host': dest, + 'vdi_uuid': cow_uuid, + 'instance_id': instance.id, + 'sr_path': VMHelper.get_sr_path(self._session), } + + task = self._session.async_call_plugin('migration', 'transfer_vhd', + {'params': pickle.dumps(params)}) + self._session.wait_for_task(task, instance.id) + finally: - self._destroy(instance, template_vm_ref, shutdown=False, - destroy_kernel_ramdisk=False) + if template_vm_ref: + self._destroy(instance, template_vm_ref, + shutdown=False, destroy_kernel_ramdisk=False) - logging.debug(_("Finished snapshot and upload for VM %s"), instance) + # TODO(mdietz): we could also consider renaming these to something + # sensible so we don't need to blindly pass around dictionaries + return {'base_copy': base_copy_uuid, 'cow': cow_uuid} + + def attach_disk(self, instance, base_copy_uuid, cow_uuid): + """Links the base copy VHD to the COW via the XAPI plugin""" + vm_ref = VMHelper.lookup(self._session, instance.name) + new_base_copy_uuid = str(uuid.uuid4()) + new_cow_uuid = str(uuid.uuid4()) + params = {'instance_id': instance.id, + 'old_base_copy_uuid': base_copy_uuid, + 'old_cow_uuid': cow_uuid, + 'new_base_copy_uuid': new_base_copy_uuid, + 'new_cow_uuid': new_cow_uuid, + 'sr_path': VMHelper.get_sr_path(self._session), } + + task = self._session.async_call_plugin('migration', + 'move_vhds_into_sr', {'params': pickle.dumps(params)}) + self._session.wait_for_task(task, instance.id) + + # Now we rescan the SR so we find the VHDs + VMHelper.scan_default_sr(self._session) + + return new_cow_uuid + + def resize(self, instance, flavor): + """Resize a running instance by changing it's RAM and disk size """ + raise NotImplementedError() def reboot(self, instance): """Reboot VM instance""" @@ -308,11 +413,6 @@ class VMOps(object): raise RuntimeError(resp_dict['message']) return resp_dict['message'] - def _start(self, instance, vm): - """Start an instance""" - task = self._session.call_xenapi("Async.VM.start", vm, False, False) - self._session.wait_for_task(task, instance.id) - def inject_file(self, instance, b64_path, b64_contents): """Write a file to the VM instance. The path to which it is to be written and the contents of the file need to be supplied; both should @@ -355,8 +455,7 @@ class VMOps(object): if hard: task = self._session.call_xenapi("Async.VM.hard_shutdown", vm) else: - task = self._session.call_xenapi("Async.VM.clean_shutdown", vm) - + task = self._session.call_xenapi('Async.VM.clean_shutdown', vm) self._session.wait_for_task(task, instance.id) except self.XenAPI.Failure, exc: LOG.exception(exc) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index d578c04ec..b63a5f8c3 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -158,10 +158,20 @@ class XenAPIConnection(object): """Create VM instance""" self._vmops.spawn(instance) + def finish_resize(self, instance, disk_info): + """Completes a resize, turning on the migrated instance""" + vdi_uuid = self._vmops.attach_disk(instance, disk_info['base_copy'], + disk_info['cow']) + self._vmops._spawn_with_disk(instance, vdi_uuid) + def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ self._vmops.snapshot(instance, image_id) + def resize(self, instance, flavor): + """Resize a VM instance""" + raise NotImplementedError() + def reboot(self, instance): """Reboot VM instance""" self._vmops.reboot(instance) @@ -188,6 +198,11 @@ class XenAPIConnection(object): """Unpause paused VM instance""" self._vmops.unpause(instance, callback) + def migrate_disk_and_power_off(self, instance, dest): + """Transfers the VHD of a running instance to another host, then shuts + off the instance copies over the COW disk""" + return self._vmops.migrate_disk_and_power_off(instance, dest) + def suspend(self, instance, callback): """suspend the specified instance""" self._vmops.suspend(instance, callback) @@ -228,6 +243,10 @@ class XenAPIConnection(object): """Return link to instance's ajax console""" return self._vmops.get_ajax_console(instance) + def get_host_ip_addr(self): + xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url) + return xs_url.netloc + def attach_volume(self, instance_name, device_path, mountpoint): """Attach volume storage to VM instance""" return self._volumeops.attach_volume(instance_name, diff --git a/nova/volume/driver.py b/nova/volume/driver.py index e3744c790..45cc800e7 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -65,14 +65,14 @@ class VolumeDriver(object): self._execute = execute self._sync_exec = sync_exec - def _try_execute(self, command): + def _try_execute(self, *command): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. tries = 0 while True: try: - self._execute(command) + self._execute(*command) return True except exception.ProcessExecutionError: tries = tries + 1 @@ -84,7 +84,7 @@ class VolumeDriver(object): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - out, err = self._execute("sudo vgs --noheadings -o name") + out, err = self._execute('sudo', 'vgs', '--noheadings', '-o', 'name') volume_groups = out.split() if not FLAGS.volume_group in volume_groups: raise exception.Error(_("volume group %s doesn't exist") @@ -97,22 +97,22 @@ class VolumeDriver(object): sizestr = '100M' else: sizestr = '%sG' % volume['size'] - self._try_execute("sudo lvcreate -L %s -n %s %s" % - (sizestr, + self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', volume['name'], - FLAGS.volume_group)) + FLAGS.volume_group) def delete_volume(self, volume): """Deletes a logical volume.""" try: - self._try_execute("sudo lvdisplay %s/%s" % + self._try_execute('sudo', 'lvdisplay', + '%s/%s' % (FLAGS.volume_group, volume['name'])) except Exception as e: # If the volume isn't present, then don't attempt to delete return True - self._try_execute("sudo lvremove -f %s/%s" % + self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % (FLAGS.volume_group, volume['name'])) @@ -168,12 +168,13 @@ class AOEDriver(VolumeDriver): blade_id) = self.db.volume_allocate_shelf_and_blade(context, volume['id']) self._try_execute( - "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (shelf_id, + 'sudo', 'vblade-persist', 'setup', + shelf_id, blade_id, FLAGS.aoe_eth_dev, - FLAGS.volume_group, - volume['name'])) + "/dev/%s/%s" % + (FLAGS.volume_group, + volume['name'])) # NOTE(vish): The standard _try_execute does not work here # because these methods throw errors if other # volumes on this host are in the process of @@ -182,9 +183,9 @@ class AOEDriver(VolumeDriver): # just wait a bit for the current volume to # be ready and ignore any errors. time.sleep(2) - self._execute("sudo vblade-persist auto all", + self._execute('sudo', 'vblade-persist', 'auto', 'all', check_exit_code=False) - self._execute("sudo vblade-persist start all", + self._execute('sudo', 'vblade-persist', 'start', 'all', check_exit_code=False) def remove_export(self, context, volume): @@ -192,15 +193,15 @@ class AOEDriver(VolumeDriver): (shelf_id, blade_id) = self.db.volume_get_shelf_and_blade(context, volume['id']) - self._try_execute("sudo vblade-persist stop %s %s" % - (shelf_id, blade_id)) - self._try_execute("sudo vblade-persist destroy %s %s" % - (shelf_id, blade_id)) + self._try_execute('sudo', 'vblade-persist', 'stop', + shelf_id, blade_id) + self._try_execute('sudo', 'vblade-persist', 'destroy', + shelf_id, blade_id) def discover_volume(self, _volume): """Discover volume on a remote host.""" - self._execute("sudo aoe-discover") - self._execute("sudo aoe-stat", check_exit_code=False) + self._execute('sudo', 'aoe-discover') + self._execute('sudo', 'aoe-stat', check_exit_code=False) def undiscover_volume(self, _volume): """Undiscover volume on a remote host.""" @@ -252,13 +253,16 @@ class ISCSIDriver(VolumeDriver): iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) - self._sync_exec("sudo ietadm --op new " - "--tid=%s --params Name=%s" % - (iscsi_target, iscsi_name), + self._sync_exec('sudo', 'ietadm', '--op', 'new', + "--tid=%s" % iscsi_target, + '--params', + "Name=%s" % iscsi_name, check_exit_code=False) - self._sync_exec("sudo ietadm --op new --tid=%s " - "--lun=0 --params Path=%s,Type=fileio" % - (iscsi_target, volume_path), + self._sync_exec('sudo', 'ietadm', '--op', 'new', + "--tid=%s" % iscsi_target, + '--lun=0', + '--params', + "Path=%s,Type=fileio" % volume_path, check_exit_code=False) def _ensure_iscsi_targets(self, context, host): @@ -279,12 +283,13 @@ class ISCSIDriver(VolumeDriver): volume['host']) iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) - self._execute("sudo ietadm --op new " - "--tid=%s --params Name=%s" % + self._execute('sudo', 'ietadm', '--op', 'new', + '--tid=%s --params Name=%s' % (iscsi_target, iscsi_name)) - self._execute("sudo ietadm --op new --tid=%s " - "--lun=0 --params Path=%s,Type=fileio" % - (iscsi_target, volume_path)) + self._execute('sudo', 'ietadm', '--op', 'new', + '--tid=%s' % iscsi_target, + '--lun=0', '--params', + 'Path=%s,Type=fileio' % volume_path) def remove_export(self, context, volume): """Removes an export for a logical volume.""" @@ -299,16 +304,18 @@ class ISCSIDriver(VolumeDriver): try: # ietadm show will exit with an error # this export has already been removed - self._execute("sudo ietadm --op show --tid=%s " % iscsi_target) + self._execute('sudo', 'ietadm', '--op', 'show', + '--tid=%s' % iscsi_target) except Exception as e: LOG.info(_("Skipping remove_export. No iscsi_target " + "is presently exported for volume: %d"), volume['id']) return - self._execute("sudo ietadm --op delete --tid=%s " - "--lun=0" % iscsi_target) - self._execute("sudo ietadm --op delete --tid=%s" % - iscsi_target) + self._execute('sudo', 'ietadm', '--op', 'delete', + '--tid=%s' % iscsi_target, + '--lun=0') + self._execute('sudo', 'ietadm', '--op', 'delete', + '--tid=%s' % iscsi_target) def _do_iscsi_discovery(self, volume): #TODO(justinsb): Deprecate discovery and use stored info @@ -317,8 +324,8 @@ class ISCSIDriver(VolumeDriver): volume_name = volume['name'] - (out, _err) = self._execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % (volume['host'])) + (out, _err) = self._execute('sudo', 'iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', volume['host']) for target in out.splitlines(): if FLAGS.iscsi_ip_prefix in target and volume_name in target: return target @@ -478,7 +485,7 @@ class RBDDriver(VolumeDriver): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - (stdout, stderr) = self._execute("rados lspools") + (stdout, stderr) = self._execute('rados', 'lspools') pools = stdout.split("\n") if not FLAGS.rbd_pool in pools: raise exception.Error(_("rbd has no pool %s") % @@ -490,16 +497,13 @@ class RBDDriver(VolumeDriver): size = 100 else: size = int(volume['size']) * 1024 - self._try_execute("rbd --pool %s --size %d create %s" % - (FLAGS.rbd_pool, - size, - volume['name'])) + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + '--size', size, 'create', volume['name']) def delete_volume(self, volume): """Deletes a logical volume.""" - self._try_execute("rbd --pool %s rm %s" % - (FLAGS.rbd_pool, - volume['name'])) + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'rm', voluname['name']) def local_path(self, volume): """Returns the path of the rbd volume.""" @@ -534,7 +538,7 @@ class SheepdogDriver(VolumeDriver): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" try: - (out, err) = self._execute("collie cluster info") + (out, err) = self._execute('collie', 'cluster', 'info') if not out.startswith('running'): raise exception.Error(_("Sheepdog is not working: %s") % out) except exception.ProcessExecutionError: @@ -546,12 +550,13 @@ class SheepdogDriver(VolumeDriver): sizestr = '100M' else: sizestr = '%sG' % volume['size'] - self._try_execute("qemu-img create sheepdog:%s %s" % - (volume['name'], sizestr)) + self._try_execute('qemu-img', 'create', + "sheepdog:%s" % volume['name'], + sizestr) def delete_volume(self, volume): """Deletes a logical volume""" - self._try_execute("collie vdi delete %s" % volume['name']) + self._try_execute('collie', 'vdi', 'delete', volume['name']) def local_path(self, volume): return "sheepdog:%s" % volume['name'] diff --git a/nova/wsgi.py b/nova/wsgi.py index 1eb66d067..2d18da8fb 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -36,6 +36,7 @@ import webob.exc from paste import deploy +from nova import exception from nova import flags from nova import log as logging from nova import utils @@ -82,6 +83,35 @@ class Server(object): log=WritableLogger(logger)) +class Request(webob.Request): + + def best_match_content_type(self): + """ + Determine the most acceptable content-type based on the + query extension then the Accept header + """ + + parts = self.path.rsplit(".", 1) + + if len(parts) > 1: + format = parts[1] + if format in ["json", "xml"]: + return "application/{0}".format(parts[1]) + + ctypes = ["application/json", "application/xml"] + bm = self.accept.best_match(ctypes) + + return bm or "application/json" + + def get_content_type(self): + try: + ct = self.headers["Content-Type"] + assert ct in ("application/xml", "application/json") + return ct + except Exception: + raise webob.exc.HTTPBadRequest("Invalid content type") + + class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @@ -113,7 +143,7 @@ class Application(object): def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: @@ -199,7 +229,7 @@ class Middleware(Application): """Do whatever you'd like to the response.""" return response - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): response = self.process_request(req) if response: @@ -212,7 +242,7 @@ class Debug(Middleware): """Helper class that can be inserted into any WSGI application chain to get information about the request and response.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): print ("*" * 40) + " REQUEST ENVIRON" for key, value in req.environ.items(): @@ -276,7 +306,7 @@ class Router(object): self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """ Route the incoming request to a controller based on self.map. @@ -285,7 +315,7 @@ class Router(object): return self._router @staticmethod - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def _dispatch(req): """ Called by self._router after matching the incoming request to a route @@ -304,11 +334,11 @@ class Controller(object): WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon itself. All action methods must, in addition to their normal parameters, accept a 'req' argument - which is the incoming webob.Request. They raise a webob.exc exception, + which is the incoming wsgi.Request. They raise a webob.exc exception, or return a dict which will be serialized by requested content type. """ - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """ Call the method specified in req.environ by RoutesMiddleware. @@ -318,32 +348,45 @@ class Controller(object): method = getattr(self, action) del arg_dict['controller'] del arg_dict['action'] + if 'format' in arg_dict: + del arg_dict['format'] arg_dict['req'] = req result = method(**arg_dict) + if type(result) is dict: - return self._serialize(result, req) + content_type = req.best_match_content_type() + body = self._serialize(result, content_type) + + response = webob.Response() + response.headers["Content-Type"] = content_type + response.body = body + return response + else: return result - def _serialize(self, data, request): + def _serialize(self, data, content_type): """ - Serialize the given dict to the response type requested in request. + Serialize the given dict to the provided content_type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), "_serialization_metadata", {}) - serializer = Serializer(request.environ, _metadata) - return serializer.to_content_type(data) + serializer = Serializer(_metadata) + try: + return serializer.serialize(data, content_type) + except exception.InvalidContentType: + raise webob.exc.HTTPNotAcceptable() - def _deserialize(self, data, request): + def _deserialize(self, data, content_type): """ - Deserialize the request body to the response type requested in request. + Deserialize the request body to the specefied content type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), "_serialization_metadata", {}) - serializer = Serializer(request.environ, _metadata) - return serializer.deserialize(data) + serializer = Serializer(_metadata) + return serializer.deserialize(data, content_type) class Serializer(object): @@ -351,50 +394,52 @@ class Serializer(object): Serializes and deserializes dictionaries to certain MIME types. """ - def __init__(self, environ, metadata=None): + def __init__(self, metadata=None): """ Create a serializer based on the given WSGI environment. 'metadata' is an optional dict mapping MIME types to information needed to serialize a dictionary to that type. """ self.metadata = metadata or {} - req = webob.Request.blank('', environ) - suffix = req.path_info.split('.')[-1].lower() - if suffix == 'json': - self.handler = self._to_json - elif suffix == 'xml': - self.handler = self._to_xml - elif 'application/json' in req.accept: - self.handler = self._to_json - elif 'application/xml' in req.accept: - self.handler = self._to_xml - else: - # This is the default - self.handler = self._to_json - def to_content_type(self, data): - """ - Serialize a dictionary into a string. + def _get_serialize_handler(self, content_type): + handlers = { + "application/json": self._to_json, + "application/xml": self._to_xml, + } - The format of the string will be decided based on the Content Type - requested in self.environ: by Accept: header, or by URL suffix. + try: + return handlers[content_type] + except Exception: + raise exception.InvalidContentType() + + def serialize(self, data, content_type): + """ + Serialize a dictionary into a string of the specified content type. """ - return self.handler(data) + return self._get_serialize_handler(content_type)(data) - def deserialize(self, datastring): + def deserialize(self, datastring, content_type): """ Deserialize a string to a dictionary. The string must be in the format of a supported MIME type. """ - datastring = datastring.strip() + return self.get_deserialize_handler(content_type)(datastring) + + def get_deserialize_handler(self, content_type): + handlers = { + "application/json": self._from_json, + "application/xml": self._from_xml, + } + try: - is_xml = (datastring[0] == '<') - if not is_xml: - return utils.loads(datastring) - return self._from_xml(datastring) - except: - return None + return handlers[content_type] + except Exception: + raise exception.InvalidContentType() + + def _from_json(self, datastring): + return utils.loads(datastring) def _from_xml(self, datastring): xmldata = self.metadata.get('application/xml', {}) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index d60816ce7..d2b2d61e6 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -30,13 +30,14 @@ import simplejson as json def main(dom_id, command, only_this_vif=None): - xsls = execute("/usr/bin/xenstore-ls /local/domain/%s/vm-data/networking" \ - % dom_id, True) + xsls = execute('/usr/bin/xenstore-ls', + '/local/domain/%s/vm-data/networking' % dom_id, True) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsr = "/usr/bin/xenstore-read /local/domain/%s/vm-data/networking/%s" - xsread = execute(xsr % (dom_id, mac), True) + xsread = execute('/usr/bin/enstore-read', + '/local/domain/%s/vm-data/networking/%s' % + (dom_id, mac), True) data = json.loads(xsread) for ip in data['ips']: if data["label"] == "public": @@ -51,9 +52,9 @@ def main(dom_id, command, only_this_vif=None): apply_iptables_rules(command, params) -def execute(command, return_stdout=False): +def execute(*command, return_stdout=False): devnull = open(os.devnull, 'w') - proc = subprocess.Popen(command, shell=True, close_fds=True, + proc = subprocess.Popen(command, close_fds=True, stdout=subprocess.PIPE, stderr=devnull) devnull.close() if return_stdout: @@ -67,45 +68,69 @@ def execute(command, return_stdout=False): def apply_iptables_rules(command, params): - iptables = lambda rule: execute("/sbin/iptables %s" % rule) + iptables = lambda *rule: execute('/sbin/iptables', *rule) - iptables("-D FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \ - -j ACCEPT" % params) + iptables('-D', 'FORWARD', '-m', 'physdev', + '--physdev-in', '%(VIF)s' % params, + '-s', '%(IP)s' % params, + '-j', 'ACCEPT') if command == 'online': - iptables("-A FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \ - -j ACCEPT" % params) + iptables('-A', 'FORWARD', '-m', 'physdev', + '--physdev-in', '%(VIF)s' % params, + '-s', '%(IP)s' % params, + '-j', 'ACCEPT') def apply_arptables_rules(command, params): - arptables = lambda rule: execute("/sbin/arptables %s" % rule) - - arptables("-D FORWARD --opcode Request --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) - arptables("-D FORWARD --opcode Reply --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) + arptables = lambda *rule: execute('/sbin/arptables', *rule) + + arptables('-D', 'FORWARD', '--opcode', 'Request', + '--in-interface', '%(VIF)s' % params, + '--source-ip', '%(IP)s' % params, + '--source-mac', '%(MAC)s' % params, + '-j', 'ACCEPT') + arptables('-D', 'FORWARD', '--opcode', 'Reply', + '--in-interface', '%(VIF)s' % params, + '--source-ip', '%(IP)s' % params, + '--source-mac', '%(MAC)s' % params, + '-j', 'ACCEPT') if command == 'online': - arptables("-A FORWARD --opcode Request --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) - arptables("-A FORWARD --opcode Reply --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) + arptables('-A', 'FORWARD', '--opcode', 'Request', + '--in-interface', '%(VIF)s' % params + '--source-ip', '%(IP)s' % params, + '--source-mac', '%(MAC)s' % params, + '-j', 'ACCEPT') + arptables('-A', 'FORWARD', '--opcode', 'Reply', + '--in-interface', '%(VIF)s' % params, + '--source-ip', '%(IP)s' % params, + '--source-mac', '%(MAC)s' % params, + '-j', 'ACCEPT') def apply_ebtables_rules(command, params): - ebtables = lambda rule: execute("/sbin/ebtables %s" % rule) - - ebtables("-D FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s -j ACCEPT" % - params) - ebtables("-D FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s -j ACCEPT" % - params) + ebtables = lambda *rule: execute("/sbin/ebtables", *rule) + + ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'], + '--arp-ip-dst', params['IP'], + '-j', 'ACCEPT') + ebtables('-D', 'FORWARD', '-p', '0800', '-o', + params['VIF'], '--ip-dst', params['IP'], + '-j', 'ACCEPT') if command == 'online': - ebtables("-A FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s \ - -j ACCEPT" % params) - ebtables("-A FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s \ - -j ACCEPT" % params) - - ebtables("-D FORWARD -s ! %(MAC)s -i %(VIF)s -j DROP" % params) + ebtables('-A', 'FORWARD', '-p', '0806', + '-o', params['VIF'], + '--arp-ip-dst', params['IP'], + '-j', 'ACCEPT') + ebtables('-A', 'FORWARD', '-p', '0800', + '-o', params['VIF'], + '--ip-dst', params['IP'], + '-j', 'ACCEPT') + + ebtables('-D', 'FORWARD', '-s', '!', params['MAC'], + '-i', params['VIF'], '-j', 'DROP') if command == 'online': - ebtables("-I FORWARD 1 -s ! %(MAC)s -i %(VIF)s -j DROP" % params) + ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'], + '-i', '%(VIF)s', '-j', 'DROP') if __name__ == "__main__": diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index aa12d432a..201b99fda 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -201,13 +201,21 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): # to request conn.putrequest('PUT', '/images/%s' % image_id) - # TODO(sirp): make `store` configurable + # NOTE(sirp): There is some confusion around OVF. Here's a summary of + # where we currently stand: + # 1. OVF as a container format is misnamed. We really should be using + # OVA since that is the name for the container format; OVF is the + # standard applied to the manifest file contained within. + # 2. We're currently uploading a vanilla tarball. In order to be OVF/OVA + # compliant, we'll need to embed a minimal OVF manifest as the first + # file. headers = { 'content-type': 'application/octet-stream', 'transfer-encoding': 'chunked', - 'x-image-meta-is_public': 'True', + 'x-image-meta-is-public': 'True', 'x-image-meta-status': 'queued', - 'x-image-meta-type': 'vhd'} + 'x-image-meta-disk-format': 'vhd', + 'x-image-meta-container-format': 'ovf'} for header, value in headers.iteritems(): conn.putheader(header, value) conn.endheaders() diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration new file mode 100644 index 000000000..4aa89863a --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +XenAPI Plugin for transfering data between host nodes +""" + +import os +import os.path +import pickle +import shutil +import subprocess + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging('migration') + + +def move_vhds_into_sr(session, args): + """Moves the VHDs from their copied location to the SR""" + params = pickle.loads(exists(args, 'params')) + instance_id = params['instance_id'] + + old_base_copy_uuid = params['old_base_copy_uuid'] + old_cow_uuid = params['old_cow_uuid'] + + new_base_copy_uuid = params['new_base_copy_uuid'] + new_cow_uuid = params['new_cow_uuid'] + + sr_path = params['sr_path'] + sr_temp_path = "%s/images/" % sr_path + + # Discover the copied VHDs locally, and then set up paths to copy + # them to under the SR + source_image_path = "%s/instance%d" % ('/images/', instance_id) + source_base_copy_path = "%s/%s.vhd" % (source_image_path, + old_base_copy_uuid) + source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid) + + temp_vhd_path = "%s/instance%d/" % (sr_temp_path, instance_id) + new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid) + new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid) + + logging.debug('Creating temporary SR path %s' % temp_vhd_path) + os.makedirs(temp_vhd_path) + + logging.debug('Moving %s into %s' % (source_base_copy_path, temp_vhd_path)) + shutil.move(source_base_copy_path, new_base_copy_path) + + logging.debug('Moving %s into %s' % (source_cow_path, temp_vhd_path)) + shutil.move(source_cow_path, new_cow_path) + + logging.debug('Cleaning up %s' % source_image_path) + os.rmdir(source_image_path) + + # Link the COW to the base copy + logging.debug('Attaching COW to the base copy %s -> %s' % + (new_cow_path, new_base_copy_path)) + subprocess.call(shlex.split('/usr/sbin/vhd-util modify -n %s -p %s' % + (new_cow_path, new_base_copy_path))) + logging.debug('Moving VHDs into SR %s' % sr_path) + shutil.move("%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid), sr_path) + shutil.move("%s/%s.vhd" % (temp_vhd_path, new_cow_uuid), sr_path) + + logging.debug('Cleaning up temporary SR path %s' % temp_vhd_path) + os.rmdir(temp_vhd_path) + return "" + + +def transfer_vhd(session, args): + """Rsyncs a VHD to an adjacent host""" + params = pickle.loads(exists(args, 'params')) + instance_id = params['instance_id'] + host = params['host'] + vdi_uuid = params['vdi_uuid'] + sr_path = params['sr_path'] + vhd_path = "%s.vhd" % vdi_uuid + + source_path = "%s/%s" % (sr_path, vhd_path) + dest_path = '%s:%sinstance%d/' % (host, '/images/', instance_id) + + logging.debug("Preparing to transmit %s to %s" % (source_path, + dest_path)) + + ssh_cmd = 'ssh -o StrictHostKeyChecking=no' + + rsync_args = shlex.split('nohup /usr/bin/rsync -av --progress -e %s %s %s' + % (ssh_cmd, source_path, dest_path)) + + logging.debug('rsync %s' % (' '.join(rsync_args, ))) + + rsync_proc = subprocess.Popen(rsync_args, stdout=subprocess.PIPE) + logging.debug('Rsync output: \n %s' % rsync_proc.communicate()[0]) + logging.debug('Rsync return: %d' % rsync_proc.returncode) + if rsync_proc.returncode != 0: + raise Exception("Unexpected VHD transfer failure") + return "" + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd, + 'move_vhds_into_sr': move_vhds_into_sr, }) diff --git a/tools/euca-get-ajax-console b/tools/euca-get-ajax-console index 37060e74f..e407dd566 100755 --- a/tools/euca-get-ajax-console +++ b/tools/euca-get-ajax-console @@ -35,7 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): import boto import nova from boto.ec2.connection import EC2Connection -from euca2ools import Euca2ool, InstanceValidationError, Util, ConnectionFailed +from euca2ools import Euca2ool, InstanceValidationError, Util usage_string = """ Retrieves a url to an ajax console terminal @@ -147,7 +147,7 @@ def main(): try: euca_conn = euca.make_connection() - except ConnectionFailed, e: + except Exception, e: print e.message sys.exit(1) try: |
