From 2c0f1d78927c14f1d155e617a066b09a00acb100 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 18 Jan 2011 17:40:36 -0600 Subject: Basic stubbing throughout the stack --- nova/api/openstack/servers.py | 37 ++++++++++++++++++++++++++++++++++++- nova/compute/api.py | 14 ++++++++++++++ nova/compute/manager.py | 13 +++++++++++++ nova/virt/fake.py | 12 ++++++++++++ nova/virt/xenapi/vmops.py | 4 ++++ nova/virt/xenapi_conn.py | 4 ++++ 6 files changed, 83 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8cbcebed2..06104b37e 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -182,9 +182,44 @@ class Controller(wsgi.Controller): return exc.HTTPNoContent() def action(self, req, id): - """ Multi-purpose method used to reboot, rebuild, and + """ Multi-purpose method used to reboot, rebuild, or resize a server """ + + actions = { + 'reboot':self._action_reboot, + 'resize':self._action_resize, + 'confirmResize':self._action_confirm_resize, + 'revertResize':self._action_revert_resize, + 'rebuild':self._action_rebuild + } + input_dict = self._deserialize(req.body, req) + for key in actions.keys(): + if key in input_dict: + return actions[key](input_dict, req, id) + return faults.Fault(exc.HTTPNotImplemented()) + + def _action_confirm_resize(self, input_dict, req, id): + return fault.Fault(exc.HTTPNotImplemented()) + + def _action_revert_resize(self, input_dict, req, id): + return fault.Fault(exc.HTTPNotImplemented()) + + def _action_rebuild(self, input_dict, req, id): + return fault.Fault(exc.HTTPNotImplemented()) + + def _action_resize(self, input_dict, req, id): + """ Resizes a given instance to the flavor size requested """ + try: + resize_flavor = input_dict['resize']['flavorId'] + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + except: + return faults.Fault(exc.HTTPUnprocessableEntity()) + return fault.Fault(exc.HTTPAccepted()) + + + def _action_reboot(self, input_dict, req, id): #TODO(sandy): rebuild/resize not supported. try: reboot_type = input_dict['reboot']['type'] diff --git a/nova/compute/api.py b/nova/compute/api.py index cc85ec691..22d5964c6 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -361,6 +361,20 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) + def revert_resize(self, context, instance_id): + """Reverts a resize, deleting the 'new' instance in the process""" + raise NotImplemented() + + def confirm_resize(self, context, instance_id): + """Confirms a migration/resize, deleting the 'old' instance in the + process.""" + raise NotImplemented() + + def resize(self, context, instance_id, flavor_id): + """Resize a running instance.""" + self._cast_compute_message('resize_instance', context, instance_id, + flavor_id) + def pause(self, context, instance_id): """Pause the given instance.""" self._cast_compute_message('pause_instance', context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 613ee45f6..714cec209 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -376,6 +376,19 @@ class ComputeManager(manager.Manager): """Update instance state when async task completes.""" self._update_state(context, instance_id) + @exception.wrap_exception + @checks_instance_lock + def resize_instance(self, context, instance_id, flavor_id): + """Moves a running instance to another host, possibly changing the RAM + and disk size in the process""" + context = context.elevated() + instance_ref = self.db.instance_get(context. instance_id) + LOG.audit(_('instance %s: migrating'), instance_id, context=context) + self.db.instance_set_state(context, instance_id, power_state.RUNNING, + 'migrating') + self.driver.resize(instance_ref, flavor_id) + self._update_state(context, instance_id) + @exception.wrap_exception @checks_instance_lock def pause_instance(self, context, instance_id): diff --git a/nova/virt/fake.py b/nova/virt/fake.py index a57a8f43b..a6fe24c3e 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -138,6 +138,18 @@ class FakeConnection(object): """ pass + def resize(self, instance, flavor): + """ + Resizes/Migrates the specified instance. + + The flavor parameter determines whether or not the instance RAM and + disk space are modified, and if so, to what size. + + The work will be done asynchronously. This function returns a task + that allows the caller to detect when it is complete. + """ + pass + def set_admin_password(self, instance, new_pass): """ Set the root password on the specified instance. diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6e359ef82..50ee24325 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -207,6 +207,10 @@ class VMOps(object): logging.debug(_("Finished snapshot and upload for VM %s"), instance) + def resize(self, instance, flavor): + """Resize a running instance by changing it's RAM and disk size """ + raise NotImplementedError() + def reboot(self, instance): """Reboot VM instance""" vm = self._get_vm_opaque_ref(instance) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 689844f34..c850dbf80 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -145,6 +145,10 @@ class XenAPIConnection(object): """ Create snapshot from a running VM instance """ self._vmops.snapshot(instance, name) + def resize(self, instance, flavor): + """Resize a VM instance""" + raise NotImplementedError() + def reboot(self, instance): """Reboot VM instance""" self._vmops.reboot(instance) -- cgit From 6d2e2c52012abac8cab322357ce0ffd0ffc2fbaf Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 2 Feb 2011 10:49:02 -0600 Subject: Casting to the scheduler --- nova/compute/api.py | 10 +++++++--- nova/rpc.py | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 0faa066b5..283845709 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -404,10 +404,14 @@ class API(base.Base): process.""" raise NotImplemented() - def resize(self, context, instance_id, flavor_id): + def resize(self, context, instance_id, flavor): """Resize a running instance.""" - self._cast_compute_message('resize_instance', context, instance_id, - flavor_id) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "resize_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id, + "flavor": flavor}}) def pause(self, context, instance_id): """Pause the given instance.""" diff --git a/nova/rpc.py b/nova/rpc.py index 01fc6d44b..c4c938f4d 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -119,7 +119,7 @@ class Consumer(messaging.Consumer): LOG.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't - # exceptions to be logged 10 times a second if some + # want exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: -- cgit From b65e994d9597f0a989b30eafc7a51bc34c4c361f Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 4 Feb 2011 15:13:18 -0600 Subject: Added a bunch of stubbed out functionality --- nova/compute/api.py | 22 +++++--- nova/compute/manager.py | 65 +++++++++++++++++++--- nova/db/api.py | 22 +++++++- nova/db/sqlalchemy/api.py | 53 ++++++++++++++++++ nova/db/sqlalchemy/models.py | 12 +++- nova/virt/xenapi/vmops.py | 18 +++++- nova/virt/xenapi_conn.py | 11 ++++ plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 4 ++ 8 files changed, 187 insertions(+), 20 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 283845709..f0d5ff2cb 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -379,6 +379,10 @@ class API(base.Base): kwargs = {'method': method, 'args': params} return rpc.call(context, queue, kwargs) + def _cast_scheduler_message(self, context, args) + """Generic handler for RPC calls to the scheduler""" + rpc.cast(context, FLAGS.scheduler_topic, args) + def snapshot(self, context, instance_id, name): """Snapshot the given instance. @@ -397,21 +401,23 @@ class API(base.Base): def revert_resize(self, context, instance_id): """Reverts a resize, deleting the 'new' instance in the process""" - raise NotImplemented() + instance_ref = self.db.instance_get(instance_id) + self._cast_compute_message('revert_resize', context, instance_id, + instance_ref['host']) def confirm_resize(self, context, instance_id): """Confirms a migration/resize, deleting the 'old' instance in the process.""" - raise NotImplemented() + migration_ref = self.db.get_migration_by_instance_id(instance_id) + self._cast_compute_message('confirm_resize', context, instance_id, + migration_ref['source_host']) def resize(self, context, instance_id, flavor): """Resize a running instance.""" - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "resize_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id, - "flavor": flavor}}) + self._cast_scheduler_message(context, + {"method": "prep_resize", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id, }},) def pause(self, context, instance_id): """Pause the given instance.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 41ef23980..140db0d3e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -380,18 +380,67 @@ class ComputeManager(manager.Manager): """Update instance state when async task completes.""" self._update_state(context, instance_id) + @exception.wrap_exception @checks_instance_lock - def resize_instance(self, context, instance_id, flavor_id): - """Moves a running instance to another host, possibly changing the RAM - and disk size in the process""" + def prep_resize(self, context, instance_id): + """Initiates the process of moving a running instance to another + host, possibly changing the RAM and disk size in the process""" context = context.elevated() instance_ref = self.db.instance_get(context. instance_id) - LOG.audit(_('instance %s: migrating'), instance_id, context=context) - self.db.instance_set_state(context, instance_id, power_state.RUNNING, - 'migrating') - self.driver.resize(instance_ref, flavor_id) - self._update_state(context, instance_id) + migration_ref = self.db.migration_create(context, + { 'instance_id': instance_id, + 'source_host': instance_ref['host'], + 'dest_host': socket.gethostbyname(socket.gethostname()), + 'status': 'pre-migrating' } + LOG.audit(_('instance %s: migrating to '), instance_id, context=context) + service = self.db.service_get_by_host_and_topic(context, + instance_ref['host'], topic) + topic = self.db.queue_get_for(context, topic, service['id']) + rpc.cast(context, topic, + { 'method': 'resize_instance', + 'migration_id': migration_ref['id'], } + + @exception.wrap_exception + @checks_instance_lock + def resize_instance(self, context, migration_id): + """Starts the migration of a running instance to another host""" + migration_ref = self.db.migration_get(context, migration_id) + self.db.migration_update(context, migration_id, + { 'status': 'migrating', }) + self.driver.transfer_disk(context, instance_id, + migration_ref['dest_host']) + self.db.migration_update(context, migration_id, + { 'status': 'post-migrating', }) + + self.driver.power_off(context, migration_ref['instance_id']) + # This is where we would update the VM record after resizing + + service = self.db.service_get_by_host_and_topic(context, + migration_ref['dest_host'], topic) + topic = self.db.queue_get_for(context, topic, service['id']) + rpc.cast(context, topic, + { 'method': 'finish_resize', + 'migration_id': migration_ref['id'], } + + @exception.wrap_exception + @checks_instance_lock + def finish_resize(self, context, migration_id): + """Completes the migration process by setting up the newly transferred + disk and turning on the instance on its new host machine""" + migration_ref = self.db.migration_get(context, migration_id) + instance_ref = self.db.instance_get(context, + migration_ref['instance_id']) + + # this may get passed into the following spawn instead + self.driver.attach_disk(context, migration_ref['instance_id']) + self.driver.spawn(context, instance_ref, preexisting=True) + + self.db.migration_update(context, migration_id, + {'status': 'finished', }) + + # Cleans up any transferred files and unmounts things + self.driver.cleanup_disk_transfer(context, instance_ref['id']) @exception.wrap_exception @checks_instance_lock diff --git a/nova/db/api.py b/nova/db/api.py index 789cb8ebb..5da0e9840 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -80,10 +80,15 @@ def service_destroy(context, instance_id): def service_get(context, service_id): - """Get an service or raise if it does not exist.""" + """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) +def service_get_by_host_and_topic(context, host, topic): + """Get a service by host it's on and topic it listens to""" + return IMPL.service_get(context, host, topic) + + def service_get_all(context, disabled=False): """Get all service.""" return IMPL.service_get_all(context, None, disabled) @@ -255,6 +260,21 @@ def floating_ip_get_by_address(context, address): #################### +def migration_create(context, values): + """Create a migration record""" + return IMPL.migration_create(context, values) + +def migration_get(context, migration_id): + """Finds a migration by the id""" + return IMPL.migration_get(context, migration_id) + +def migration_get_by_instance_id(context, instance_id): + """Finds a migration by the instance id its migrating""" + return IMPL.migration_get_by_instance_id(context, instance_id) + +#################### + + def fixed_ip_associate(context, address, instance_id): """Associate fixed ip to instance. diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 85250d56e..e94f9f4d2 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -156,6 +156,15 @@ def service_get_all_by_topic(context, topic): filter_by(topic=topic).\ all() +@require_admin_context +def service_get_by_host_and_topic(context, host, topic): + session = get_session() + return session.query(models.Service).\ + filter_by(deleted=False).\ + filter_by(disabled=False).\ + filter_by(host=host).\ + filter_by(topic=topic).\ + all() @require_admin_context def service_get_all_by_host(context, host): @@ -1909,6 +1918,50 @@ def host_get_networks(context, host): all() +################### + + +@require_admin_context +def migration_create(context, values): + migration = models.Migration() + migration.update(values) + migration.save() + return migration + + +@require_admin_context +def migration_update(context, migration_id, values): + session = get_session() + with session.begin(): + migration = migration_get(context, migration_id, session=session) + migration.update(values) + return migration + + +@require_admin_context +def migration_get(context, migration_id): + session = get_session() + result = session.query(models.Migration.\ + filter_by(migration_id=migration_id)). + first() + if not result: + raise exception.NotFound(_("No migration found with id %s") + % migration_id) + return result + + +@require_admin_context +def migration_get_by_instance(context, instance_id): + session = get_session() + result = session.query(models.Migration.\ + filter_by(instance_id=instance_id)). + first() + if not result: + raise exception.NotFound(_("No migration found with instance id %s") + % migration_id) + return result + + ################## diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 7efb36c0e..499275504 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -366,6 +366,15 @@ class KeyPair(BASE, NovaBase): public_key = Column(Text) +class Migration(BASE, NovaBase): + """Represents a running host-to-host migration.""" + __tablename__ = 'migrations' + source_host = Column(String(255)) + dest_host = Column(String(255)) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + status = Column(String(255)) #TODO(_cerberus_): enum + + class Network(BASE, NovaBase): """Represents a network.""" __tablename__ = 'networks' @@ -547,7 +556,8 @@ def register_models(): Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate, ConsolePool, Console) # , Image, Host + Project, Certificate, ConsolePool, Console, + Migration) # , Image, Host engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3f9eb39d1..468881355 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -60,6 +60,15 @@ class VMOps(object): vms.append(rec["name_label"]) return vms + def power_on(self, instance): + """Power on a VM instance""" + vm = VMHelper.lookup(self._session, instance.name) + if vm is None: + raise exception(_('Attempted to power on non-existent instance' + ' bad instance id %s') % instance.id) + LOG.debug(_("Starting instance %s"), instance.name) + self._session.call_xenapi('VM.start', vm, False, False) + def spawn(self, instance): """Create VM instance""" vm = VMHelper.lookup(self._session, instance.name) @@ -259,7 +268,8 @@ class VMOps(object): raise RuntimeError(resp_dict['message']) return resp_dict['message'] - def _shutdown(self, instance, vm): + + def _shutdown(self, instance, vm, method='hard'): """Shutdown an instance """ state = self.get_info(instance['name'])['state'] if state == power_state.SHUTDOWN: @@ -268,7 +278,11 @@ class VMOps(object): return try: - task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) + task = None + if method == 'clean': + task = self._session.call_xenapi('Async.VM.clean_shutdown', vm) + else: + task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: LOG.exception(exc) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 4637b4c29..628291764 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -184,6 +184,17 @@ class XenAPIConnection(object): """Unpause paused VM instance""" self._vmops.unpause(instance, callback) + def power_off(self, instance): + """Shuts down a running VM instance""" + self._vmops._shutdown(instance, method='clean') + + def power_on(self, instance): + """powers on a powered off VM instance""" + self._vmops.power_on(instance) + + def transfer_disk(self, instance, dest, callback): + self._vmops.transfer_disk( + def suspend(self, instance, callback): """suspend the specified instance""" self._vmops.suspend(instance, callback) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index aadacce57..3b7ceacc9 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -54,6 +54,10 @@ def copy_kernel_vdi(session,args): _copy_kernel_vdi('/dev/%s' % dev,copy_args)) return filename +def transfer_disk(dest, args): + vdi = exists(args, 'vdi-ref') + + def _copy_kernel_vdi(dest,copy_args): vdi_uuid=copy_args['vdi_uuid'] vdi_size=copy_args['vdi_size'] -- cgit From 855b9443cf109302e9882d527f237049b9624a05 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 4 Feb 2011 15:43:41 -0600 Subject: Didn't mean to actually make changes to the glance plugin --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 4 ---- 1 file changed, 4 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 3b7ceacc9..aadacce57 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -54,10 +54,6 @@ def copy_kernel_vdi(session,args): _copy_kernel_vdi('/dev/%s' % dev,copy_args)) return filename -def transfer_disk(dest, args): - vdi = exists(args, 'vdi-ref') - - def _copy_kernel_vdi(dest,copy_args): vdi_uuid=copy_args['vdi_uuid'] vdi_size=copy_args['vdi_size'] -- cgit From 2458d674807d951a6b58c28cd334cd8d097822a9 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 7 Feb 2011 10:20:49 -0600 Subject: A few changes --- nova/compute/manager.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 140db0d3e..485efc047 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -380,6 +380,20 @@ class ComputeManager(manager.Manager): """Update instance state when async task completes.""" self._update_state(context, instance_id) + @exception.wrap_exception + @checks_instance_lock + def confirm_resize(self, context, instance_id): + """Destroys the old instance on the source machine""" + pass + + @exception.wrap_exception + @echecks_instance_lock + def revert_resize(self, context, instance_id): + """Destroys the new instance on the destination machine, + reverts the model changes, and powers on the old + instance on the source machine""" + pass + @exception.wrap_exception @checks_instance_lock @@ -395,8 +409,9 @@ class ComputeManager(manager.Manager): 'status': 'pre-migrating' } LOG.audit(_('instance %s: migrating to '), instance_id, context=context) service = self.db.service_get_by_host_and_topic(context, - instance_ref['host'], topic) - topic = self.db.queue_get_for(context, topic, service['id']) + instance_ref['host'], FLAGS.compute_topic) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, + service['id']) rpc.cast(context, topic, { 'method': 'resize_instance', 'migration_id': migration_ref['id'], } @@ -417,8 +432,9 @@ class ComputeManager(manager.Manager): # This is where we would update the VM record after resizing service = self.db.service_get_by_host_and_topic(context, - migration_ref['dest_host'], topic) - topic = self.db.queue_get_for(context, topic, service['id']) + migration_ref['dest_host'], FLAGS.compute_topic) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, + service['id']) rpc.cast(context, topic, { 'method': 'finish_resize', 'migration_id': migration_ref['id'], } -- cgit From e59c62efe5492e59fcc26b7b74f6ac2daa0caabe Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 7 Feb 2011 16:57:02 -0600 Subject: Added data_transfer xapi plugin --- nova/virt/xenapi_conn.py | 2 +- .../xenapi/etc/xapi.d/plugins/data_transfer | 44 ++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 628291764..acfde6caf 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -193,7 +193,7 @@ class XenAPIConnection(object): self._vmops.power_on(instance) def transfer_disk(self, instance, dest, callback): - self._vmops.transfer_disk( + self._vmops.transfer_disk() def suspend(self, instance, callback): """suspend the specified instance""" diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer b/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer new file mode 100644 index 000000000..d310a65d9 --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +XenAPI Plugin for transfering data between host nodes +""" + +import os.path +import subprocess + +import XenAPIPlugin + +SSH_HOSTS = '/root/.ssh/known_hosts' +DEVNULL = '/dev/null' +KEYSCAN = '/usr/bin/ssh-keyscan' + +def _key_scan_and_add(host): + """SSH scans a remote host and writes the SSH key out to known_hosts""" + open(SSH_HOSTS, 'a').close() + null = open(DEVNULL, 'w') + known_hosts = open(SSH_HOSTS, 'a') + key = subprocess.Popen(['/usr/bin/ssh-keyscan', '-t', 'rsa', host], + stdout=subprocess.PIPE, stderr=null).communicate()[0].strip() + grep = subprocess.call(['/bin/grep', '-o', '%s' % key, SSH_HOSTS], + stdout=null, stderr=null) + if grep == 1: + known_hosts.write(key) + null.close() + known_hosts.close() -- cgit From a40f6041556ec09a1cb79c2b8abcec7fa70e72bf Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 7 Feb 2011 17:12:15 -0600 Subject: Some stuff --- nova/virt/xenapi_conn.py | 2 +- plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index acfde6caf..726106b37 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -193,7 +193,7 @@ class XenAPIConnection(object): self._vmops.power_on(instance) def transfer_disk(self, instance, dest, callback): - self._vmops.transfer_disk() + self._vmops.transfer_disk(dest) def suspend(self, instance, callback): """suspend the specified instance""" diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer b/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer index d310a65d9..cde7bb823 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer @@ -28,10 +28,13 @@ import XenAPIPlugin SSH_HOSTS = '/root/.ssh/known_hosts' DEVNULL = '/dev/null' KEYSCAN = '/usr/bin/ssh-keyscan' +RSYNC = '/usr/bin/rsync' def _key_scan_and_add(host): """SSH scans a remote host and writes the SSH key out to known_hosts""" + # Touch the file if it doesn't yet exist open(SSH_HOSTS, 'a').close() + null = open(DEVNULL, 'w') known_hosts = open(SSH_HOSTS, 'a') key = subprocess.Popen(['/usr/bin/ssh-keyscan', '-t', 'rsa', host], @@ -42,3 +45,12 @@ def _key_scan_and_add(host): known_hosts.write(key) null.close() known_hosts.close() + +def transfer_vhd(host, vhd_path): + """Rsyncs a VHD to an adjacent host""" + _key_scan_and_add(host) + if subprocess.call([RSYNC, vhd_path, "%s:/root/" % host]) != 0: + raise Exception("Unexpected VHD transfer failure") + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd}) -- cgit From 203c94c89caabc1d4ece4c462819a90c05cde163 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 7 Feb 2011 17:39:53 -0600 Subject: blargh --- nova/compute/api.py | 2 +- nova/virt/xenapi/vmops.py | 8 ++++++++ nova/virt/xenapi_conn.py | 2 +- plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer | 6 +++--- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index f0d5ff2cb..6b2628378 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -379,7 +379,7 @@ class API(base.Base): kwargs = {'method': method, 'args': params} return rpc.call(context, queue, kwargs) - def _cast_scheduler_message(self, context, args) + def _cast_scheduler_message(self, context, args): """Generic handler for RPC calls to the scheduler""" rpc.cast(context, FLAGS.scheduler_topic, args) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 468881355..4b835c707 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -220,6 +220,14 @@ class VMOps(object): logging.debug(_("Finished snapshot and upload for VM %s"), instance) + def transfer_disk(self, instance, dest): + """ Copies a VHD from one host machine to another + + :param instance: the instance that owns the VHD in question + :param dest: the destination host machine + """ + + def resize(self, instance, flavor): """Resize a running instance by changing it's RAM and disk size """ raise NotImplementedError() diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 726106b37..2e587117a 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -193,7 +193,7 @@ class XenAPIConnection(object): self._vmops.power_on(instance) def transfer_disk(self, instance, dest, callback): - self._vmops.transfer_disk(dest) + self._vmops.transfer_disk(instance, dest) def suspend(self, instance, callback): """suspend the specified instance""" diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer b/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer index cde7bb823..2af4a758b 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer @@ -46,11 +46,11 @@ def _key_scan_and_add(host): null.close() known_hosts.close() -def transfer_vhd(host, vhd_path): +def transfer_file(host, file_path): """Rsyncs a VHD to an adjacent host""" _key_scan_and_add(host) - if subprocess.call([RSYNC, vhd_path, "%s:/root/" % host]) != 0: + if subprocess.call([RSYNC, file_path, "%s:/root/" % host]) != 0: raise Exception("Unexpected VHD transfer failure") if __name__ == '__main__': - XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd}) + XenAPIPlugin.dispatch({'transfer_file': transfer_file}) -- cgit From 3f2cd17011e17991ebf1a77605686ce3dc48d92e Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 8 Feb 2011 10:47:23 -0600 Subject: Changes and bug fixes --- nova/compute/manager.py | 8 ++-- nova/db/sqlalchemy/api.py | 6 +-- .../sqlalchemy/migrate_repo/versions/003_cactus.py | 46 ++++++++++++++++++++++ nova/db/sqlalchemy/migration.py | 2 +- nova/virt/xenapi/vmops.py | 2 +- 5 files changed, 54 insertions(+), 10 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 485efc047..4189c49a4 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -387,7 +387,7 @@ class ComputeManager(manager.Manager): pass @exception.wrap_exception - @echecks_instance_lock + @checks_instance_lock def revert_resize(self, context, instance_id): """Destroys the new instance on the destination machine, reverts the model changes, and powers on the old @@ -406,7 +406,7 @@ class ComputeManager(manager.Manager): { 'instance_id': instance_id, 'source_host': instance_ref['host'], 'dest_host': socket.gethostbyname(socket.gethostname()), - 'status': 'pre-migrating' } + 'status': 'pre-migrating' }) LOG.audit(_('instance %s: migrating to '), instance_id, context=context) service = self.db.service_get_by_host_and_topic(context, instance_ref['host'], FLAGS.compute_topic) @@ -414,7 +414,7 @@ class ComputeManager(manager.Manager): service['id']) rpc.cast(context, topic, { 'method': 'resize_instance', - 'migration_id': migration_ref['id'], } + 'migration_id': migration_ref['id'], }) @exception.wrap_exception @checks_instance_lock @@ -437,7 +437,7 @@ class ComputeManager(manager.Manager): service['id']) rpc.cast(context, topic, { 'method': 'finish_resize', - 'migration_id': migration_ref['id'], } + 'migration_id': migration_ref['id'], }) @exception.wrap_exception @checks_instance_lock diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e94f9f4d2..ece1cd373 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1942,8 +1942,7 @@ def migration_update(context, migration_id, values): def migration_get(context, migration_id): session = get_session() result = session.query(models.Migration.\ - filter_by(migration_id=migration_id)). - first() + filter_by(migration_id=migration_id)).first() if not result: raise exception.NotFound(_("No migration found with id %s") % migration_id) @@ -1954,8 +1953,7 @@ def migration_get(context, migration_id): def migration_get_by_instance(context, instance_id): session = get_session() result = session.query(models.Migration.\ - filter_by(instance_id=instance_id)). - first() + filter_by(instance_id=instance_id)).first() if not result: raise exception.NotFound(_("No migration found with instance id %s") % migration_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py new file mode 100644 index 000000000..bbe5cbcb0 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.from sqlalchemy import * + +from migrate import * + +from nova import log as logging + + +meta = MetaData() + +# +# New Tables +# + +migrations = Table('migrations', meta, + Column('source_host', String(255)) + Column('dest_host', String(255)) + Column('instance_id', Integer, ForeignKey('instances.id'), nullable=True) + Column('status', String(255)) + ) + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (migrations): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index 2a13c5466..644e3e45e 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -50,7 +50,7 @@ def db_version(): 'key_pairs', 'networks', 'projects', 'quotas', 'security_group_instance_association', 'security_group_rules', 'security_groups', - 'services', + 'services', 'migrations', 'users', 'user_project_association', 'user_project_role_association', 'user_role_association', diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 4b835c707..6a7621502 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -226,7 +226,7 @@ class VMOps(object): :param instance: the instance that owns the VHD in question :param dest: the destination host machine """ - + vm_ref = VMHelper.lookup(self._session, instance.name) def resize(self, instance, flavor): """Resize a running instance by changing it's RAM and disk size """ -- cgit From 49e07d0581317daf1bb605d56575c62743a210be Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 8 Feb 2011 11:07:03 -0600 Subject: Commas help --- nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py index bbe5cbcb0..dc384fbc3 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py @@ -27,10 +27,11 @@ meta = MetaData() # migrations = Table('migrations', meta, - Column('source_host', String(255)) - Column('dest_host', String(255)) - Column('instance_id', Integer, ForeignKey('instances.id'), nullable=True) - Column('status', String(255)) + Column('source_host', String(255)), + Column('dest_host', String(255)), + Column('instance_id', Integer, ForeignKey('instances.id'), + nullable=True), + Column('status', String(255)) ) def upgrade(migrate_engine): -- cgit From 089286802db0dca22cd67e46f26fab3ab0a3a73b Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 8 Feb 2011 13:12:21 -0600 Subject: Typos and primary keys --- nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py index dc384fbc3..4d01cd874 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py @@ -27,6 +27,7 @@ meta = MetaData() # migrations = Table('migrations', meta, + Column('id', Integer(), primary_key=True, nullable=False), Column('source_host', String(255)), Column('dest_host', String(255)), Column('instance_id', Integer, ForeignKey('instances.id'), @@ -38,7 +39,7 @@ def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine - for table in (migrations): + for table in (migrations, ): try: table.create() except Exception: -- cgit From ce5e3bdd30712aa6704926e6cdeb5ae73ae8200b Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 9 Feb 2011 15:26:37 -0600 Subject: A lot of stuff --- nova/compute/manager.py | 8 ++-- nova/db/sqlalchemy/models.py | 1 + nova/virt/xenapi/vmops.py | 53 +++++++++++++++++----- nova/virt/xenapi_conn.py | 8 +++- .../xenapi/etc/xapi.d/plugins/data_transfer | 37 +++++++-------- 5 files changed, 71 insertions(+), 36 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 4189c49a4..ac09f7c8c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -423,12 +423,12 @@ class ComputeManager(manager.Manager): migration_ref = self.db.migration_get(context, migration_id) self.db.migration_update(context, migration_id, { 'status': 'migrating', }) - self.driver.transfer_disk(context, instance_id, + + self.driver.migrate_disk_and_power_off(context, instance, migration_ref['dest_host']) + self.db.migration_update(context, migration_id, { 'status': 'post-migrating', }) - - self.driver.power_off(context, migration_ref['instance_id']) # This is where we would update the VM record after resizing service = self.db.service_get_by_host_and_topic(context, @@ -449,7 +449,7 @@ class ComputeManager(manager.Manager): migration_ref['instance_id']) # this may get passed into the following spawn instead - self.driver.attach_disk(context, migration_ref['instance_id']) + self.driver.attach_disk(context, instance_ref) self.driver.spawn(context, instance_ref, preexisting=True) self.db.migration_update(context, migration_id, diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 499275504..ebf3a382b 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -369,6 +369,7 @@ class KeyPair(BASE, NovaBase): class Migration(BASE, NovaBase): """Represents a running host-to-host migration.""" __tablename__ = 'migrations' + id = Column(Integer, primary_key=True, nullable=False) source_host = Column(String(255)) dest_host = Column(String(255)) instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6a7621502..40b075b3d 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -196,6 +196,26 @@ class VMOps(object): Glance. """ + with self._get_snapshot(instance) as snapshot: + # call plugin to ship snapshot off to glance + VMHelper.upload_image( + self._session, instance.id, snapshot.vdi_uuids, image_id) + + logging.debug(_("Finished snapshot and upload for VM %s"), instance) + + def _get_snapshot(self, instance): + class Snapshot(object): + def __init__(self, virt, instance, vdis): + self.instance = instance + self.vdi_uuids = vdis + self.virt = virt + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.virt._destroy(self.instance, self.vm_ref, shutdown=False) + #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added @@ -204,30 +224,41 @@ class VMOps(object): label = "%s-snapshot" % instance.name try: - template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( + _, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) + return Snapshot(self, instance, template_vdi_uuids) except self.XenAPI.Failure, exc: logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s") % locals()) return - try: - # call plugin to ship snapshot off to glance - VMHelper.upload_image( - self._session, instance.id, template_vdi_uuids, image_id) - finally: - self._destroy(instance, template_vm_ref, shutdown=False) - - logging.debug(_("Finished snapshot and upload for VM %s"), instance) - - def transfer_disk(self, instance, dest): + def migrate_disk_and_power_off(self, instance, dest): """ Copies a VHD from one host machine to another :param instance: the instance that owns the VHD in question :param dest: the destination host machine + :param disk_type: values are 'primary' or 'cow' """ vm_ref = VMHelper.lookup(self._session, instance.name) + # The primary VDI becomes the COW after the snapshot. We can figure + # this out from the VBD. The base copy is the parent_uuid returned + # from the snapshot creation + with self._get_snapshot(instance) as snapshot: + params = {'host':dest, 'vdi_uuid':snapshot.vdi_uuids[1]} + kwargs = {'params': pickle.dumps(params)} + self._session.async_call_plugin('data_transfer', 'transfer_vhd', + kwargs) + + # Now power down the instance and transfer the COW VHD + self._shutdown(instance, method='clean') + + _, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + params = {'host':dest, 'vdi_uuid': vm_vdi_rec['uuid']} + kwargs = {'params': pickle.dumps(params)} + self._session.async_call_plugin('data_transfer', 'transfer_vhd', + kwargs) + def resize(self, instance, flavor): """Resize a running instance by changing it's RAM and disk size """ raise NotImplementedError() diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 2e587117a..98b5e7851 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -192,8 +192,14 @@ class XenAPIConnection(object): """powers on a powered off VM instance""" self._vmops.power_on(instance) - def transfer_disk(self, instance, dest, callback): + def migrate_disk_and_power_off(self, instance, dest): + """Transfers the VHD of a running instance to another host, then shuts + off the instance copies over the COW disk""" self._vmops.transfer_disk(instance, dest) + + def move_disk(self, instance_ref): + """Moves the copied VDIs into the SR""" + pass def suspend(self, instance, callback): """suspend the specified instance""" diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer b/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer index 2af4a758b..bd46e1c0b 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer @@ -21,6 +21,7 @@ XenAPI Plugin for transfering data between host nodes """ import os.path +import pickle import subprocess import XenAPIPlugin @@ -30,27 +31,23 @@ DEVNULL = '/dev/null' KEYSCAN = '/usr/bin/ssh-keyscan' RSYNC = '/usr/bin/rsync' -def _key_scan_and_add(host): - """SSH scans a remote host and writes the SSH key out to known_hosts""" - # Touch the file if it doesn't yet exist - open(SSH_HOSTS, 'a').close() - - null = open(DEVNULL, 'w') - known_hosts = open(SSH_HOSTS, 'a') - key = subprocess.Popen(['/usr/bin/ssh-keyscan', '-t', 'rsa', host], - stdout=subprocess.PIPE, stderr=null).communicate()[0].strip() - grep = subprocess.call(['/bin/grep', '-o', '%s' % key, SSH_HOSTS], - stdout=null, stderr=null) - if grep == 1: - known_hosts.write(key) - null.close() - known_hosts.close() - -def transfer_file(host, file_path): + +def transfer_vhd(session, args): """Rsyncs a VHD to an adjacent host""" - _key_scan_and_add(host) - if subprocess.call([RSYNC, file_path, "%s:/root/" % host]) != 0: + params = pickle.dumps(args) + instance_id = params['instance_id'] + host = params['host'] + vdi_uuid = params['vdi_uuid'] + sr_path = get_sr_path(session) + vhd_path = "%s.vhd" % vdi_uuid + + source_path = "%s/%s" % (sr_path, vhd_path) + dest_path = '%s:/images/instance%d/' % (host, instance_id) + rsync_args = [['nohup', RSYNC, '-av', '--progress', + '-e "ssh -o StrictHostKeyChecking=no"', source_path, dest_path] + + if subprocess.call(rsync_args) != 0: raise Exception("Unexpected VHD transfer failure") if __name__ == '__main__': - XenAPIPlugin.dispatch({'transfer_file': transfer_file}) + XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd}) -- cgit From 482c7b57a3d0ac8bf6df98539bf8a1220470e0f7 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 9 Feb 2011 15:27:23 -0600 Subject: Renamed migration plugin --- .../xenapi/etc/xapi.d/plugins/data_transfer | 53 ---------------------- .../xenserver/xenapi/etc/xapi.d/plugins/migration | 53 ++++++++++++++++++++++ 2 files changed, 53 insertions(+), 53 deletions(-) delete mode 100644 plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer create mode 100644 plugins/xenserver/xenapi/etc/xapi.d/plugins/migration diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer b/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer deleted file mode 100644 index bd46e1c0b..000000000 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/data_transfer +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -XenAPI Plugin for transfering data between host nodes -""" - -import os.path -import pickle -import subprocess - -import XenAPIPlugin - -SSH_HOSTS = '/root/.ssh/known_hosts' -DEVNULL = '/dev/null' -KEYSCAN = '/usr/bin/ssh-keyscan' -RSYNC = '/usr/bin/rsync' - - -def transfer_vhd(session, args): - """Rsyncs a VHD to an adjacent host""" - params = pickle.dumps(args) - instance_id = params['instance_id'] - host = params['host'] - vdi_uuid = params['vdi_uuid'] - sr_path = get_sr_path(session) - vhd_path = "%s.vhd" % vdi_uuid - - source_path = "%s/%s" % (sr_path, vhd_path) - dest_path = '%s:/images/instance%d/' % (host, instance_id) - rsync_args = [['nohup', RSYNC, '-av', '--progress', - '-e "ssh -o StrictHostKeyChecking=no"', source_path, dest_path] - - if subprocess.call(rsync_args) != 0: - raise Exception("Unexpected VHD transfer failure") - -if __name__ == '__main__': - XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration new file mode 100644 index 000000000..bd46e1c0b --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +XenAPI Plugin for transfering data between host nodes +""" + +import os.path +import pickle +import subprocess + +import XenAPIPlugin + +SSH_HOSTS = '/root/.ssh/known_hosts' +DEVNULL = '/dev/null' +KEYSCAN = '/usr/bin/ssh-keyscan' +RSYNC = '/usr/bin/rsync' + + +def transfer_vhd(session, args): + """Rsyncs a VHD to an adjacent host""" + params = pickle.dumps(args) + instance_id = params['instance_id'] + host = params['host'] + vdi_uuid = params['vdi_uuid'] + sr_path = get_sr_path(session) + vhd_path = "%s.vhd" % vdi_uuid + + source_path = "%s/%s" % (sr_path, vhd_path) + dest_path = '%s:/images/instance%d/' % (host, instance_id) + rsync_args = [['nohup', RSYNC, '-av', '--progress', + '-e "ssh -o StrictHostKeyChecking=no"', source_path, dest_path] + + if subprocess.call(rsync_args) != 0: + raise Exception("Unexpected VHD transfer failure") + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd}) -- cgit From ac33f61c5c382fc7c8e8ab872192858860672d70 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 9 Feb 2011 16:38:27 -0600 Subject: Plugin tidying and more migration implementation --- nova/virt/xenapi/vm_utils.py | 16 +++--- nova/virt/xenapi/vmops.py | 35 +++++++---- nova/virt/xenapi_conn.py | 9 ++- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 4 +- .../xenserver/xenapi/etc/xapi.d/plugins/migration | 67 +++++++++++++++++++++- 5 files changed, 108 insertions(+), 23 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4bbd522c1..e16662aad 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -478,6 +478,14 @@ class VMHelper(HelperBase): except cls.XenAPI.Failure as e: return {"Unable to retrieve diagnostics": e} + @classmethod + def scan_sr(cls, session, instance_id, sr_ref): + LOG.debug(_("Re-scanning SR %s"), sr_ref) + task = session.call_xenapi('Async.SR.scan', sr_ref) + session.wait_for_task(instance_id, task) + + + def get_rrd(host, uuid): """Return the VM RRD XML as a string""" @@ -520,12 +528,6 @@ def get_vhd_parent_uuid(session, vdi_ref): return None -def scan_sr(session, instance_id, sr_ref): - LOG.debug(_("Re-scanning SR %s"), sr_ref) - task = session.call_xenapi('Async.SR.scan', sr_ref) - session.wait_for_task(instance_id, task) - - def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, original_parent_uuid): """ Spin until the parent VHD is coalesced into its parent VHD @@ -550,7 +552,7 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, " %(max_attempts)d), giving up...") % locals()) raise exception.Error(msg) - scan_sr(session, instance_id, sr_ref) + VMHelper.scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent" diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 40b075b3d..ea4b7899b 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -146,7 +146,7 @@ class VMOps(object): """ vm = None try: - if instance_or_vm.startswith("OpaqueRef:"): + if instance_or_vm.startswith("OpaqueRef:") # Got passed an opaque ref; return it return instance_or_vm else: @@ -241,23 +241,38 @@ class VMOps(object): """ vm_ref = VMHelper.lookup(self._session, instance.name) - # The primary VDI becomes the COW after the snapshot. We can figure - # this out from the VBD. The base copy is the parent_uuid returned + # The primary VDI becomes the COW after the snapshot, and we can + # identify it via the VBD. The base copy is the parent_uuid returned # from the snapshot creation + + #TODO(mdietz): explicitly forcing the base_copy and cow names is + #pretty fugly with self._get_snapshot(instance) as snapshot: - params = {'host':dest, 'vdi_uuid':snapshot.vdi_uuids[1]} - kwargs = {'params': pickle.dumps(params)} - self._session.async_call_plugin('data_transfer', 'transfer_vhd', - kwargs) + params = {'host':dest, 'vdi_uuid':snapshot.vdi_uuids[1], + 'dest_name':'base_copy.vhd'} + self._session.async_call_plugin('migration', 'transfer_vhd', + {'params': pickle.dumps(params)}) # Now power down the instance and transfer the COW VHD self._shutdown(instance, method='clean') _, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) - params = {'host':dest, 'vdi_uuid': vm_vdi_rec['uuid']} - kwargs = {'params': pickle.dumps(params)} + params = {'host':dest, 'vdi_uuid': vm_vdi_rec['uuid'], + 'dest_name': 'cow.vhd'} self._session.async_call_plugin('data_transfer', 'transfer_vhd', - kwargs) + {'params': pickle.dumps(params)}) + return snapshot.vdi_uuids[1], vm_vdi_rec['uuid'] + + def attach_disk(self, instance): + vm_ref = VMHelper.lookup(self._session, instance.name) + + params = { 'instance_id': instance.id } + self._session.async_call_plugin('migration', 'move_vhds_into_sr', + {'params': pickle.dumps(params)}) + + + _, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + VMHelper.scan_sr(self._session, instance.id, vm_vdi_rec['SR']) def resize(self, instance, flavor): """Resize a running instance by changing it's RAM and disk size """ diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 98b5e7851..cc43050b4 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -164,6 +164,9 @@ class XenAPIConnection(object): """Resize a VM instance""" raise NotImplementedError() + def attach_disk(self, instance_ref): + + def reboot(self, instance): """Reboot VM instance""" self._vmops.reboot(instance) @@ -195,11 +198,11 @@ class XenAPIConnection(object): def migrate_disk_and_power_off(self, instance, dest): """Transfers the VHD of a running instance to another host, then shuts off the instance copies over the COW disk""" - self._vmops.transfer_disk(instance, dest) + self._vmops.migrate_disk_and_power_off(instance, dest) - def move_disk(self, instance_ref): + def attach_disk(self, instance): """Moves the copied VDIs into the SR""" - pass + self._vmops.attach_disk(instance) def suspend(self, instance, callback): """suspend the specified instance""" diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index aadacce57..817269769 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -138,8 +138,8 @@ def get_sr_path(session): return sr_path -#TODO(sirp): both objectstore and glance need this, should this be refactored -#into common lib +#TODO(sirp): objectstore, migration and glance need this, should this be +# refactored into common lib def find_sr(session): host = get_this_host(session) srs = session.xenapi.SR.get_all() diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index bd46e1c0b..e81b18a5e 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -20,16 +20,79 @@ XenAPI Plugin for transfering data between host nodes """ +import os import os.path import pickle +import shutil import subprocess +import uuid import XenAPIPlugin +from pluginlib_nova import * + SSH_HOSTS = '/root/.ssh/known_hosts' DEVNULL = '/dev/null' KEYSCAN = '/usr/bin/ssh-keyscan' RSYNC = '/usr/bin/rsync' +FILE_SR_PATH = '/var/run/sr-mount' +IMAGE_PATH = '/images/' +VHD_UTIL = '/usr/sbin/vhd-util' + +def get_sr_path(session): + sr_ref = find_sr(session) + + if sr_ref is None: + raise Exception('Cannot find SR to read VDI from') + + sr_rec = session.xenapi.SR.get_record(sr_ref) + sr_uuid = sr_rec["uuid"] + sr_path = os.path.join(FILE_SR_PATH, sr_uuid) + return sr_path + +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + +def move_vhds_into_sr(session, args): + """Moves the VHDs from their copied location to the SR""" + params = pickle.dumps(args) + instance_id = params['instance_id'] + + sr_path = get_sr_path(session) + + # Discover the copied VHDs locally, and then set up paths to copy + # them to under the SR + source_image_path = "%s/instance%d" % (IMAGE_PATH, instance_id) + source_base_copy_path = "%s/base_copy.vhd" % source_image_path + source_cow_path = "%s/cow.vhd" % source_image_path + + temp_vhd_path = "%s/instance%d/" % (sr_path, instance_id) + new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, str(uuid.uuid4())) + new_cow_path = "%s/%s.vhd" % (temp_vhd_path, str(uuid.uuid4())) + + os.mkdir(temp_vhd_path) + shutil.move(source_base_copy_path, new_base_copy_path) + shutil.move(source_cow_path, new_cow_path) + + os.rmdir(source_image_path) + + # Link the COW to the base copy + subprocess.call([VHD_UTIL, 'modify', '-n', new_cow_path, '-p', + new_base_copy_path]) + + shutil.move("%s/*.vhd" % temp_vhd_path, sr_path) + os.rmdir(temp_vhd_path) def transfer_vhd(session, args): @@ -38,11 +101,13 @@ def transfer_vhd(session, args): instance_id = params['instance_id'] host = params['host'] vdi_uuid = params['vdi_uuid'] + dest_name = params['dest_name'] sr_path = get_sr_path(session) vhd_path = "%s.vhd" % vdi_uuid source_path = "%s/%s" % (sr_path, vhd_path) - dest_path = '%s:/images/instance%d/' % (host, instance_id) + dest_path = '%s:%sinstance%d/%s' % (host, IMAGE_PATH, instance_id, + dest_name) rsync_args = [['nohup', RSYNC, '-av', '--progress', '-e "ssh -o StrictHostKeyChecking=no"', source_path, dest_path] -- cgit From d8a7a76cd4fd22a6ad9fc1a7b879a8dbffcede5f Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 10 Feb 2011 13:42:57 -0600 Subject: Some more cleanup --- nova/compute/manager.py | 7 ++++--- nova/virt/xenapi/vmops.py | 3 ++- nova/virt/xenapi_conn.py | 5 +---- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 3 ++- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ac09f7c8c..54c3412f4 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -429,7 +429,8 @@ class ComputeManager(manager.Manager): self.db.migration_update(context, migration_id, { 'status': 'post-migrating', }) - # This is where we would update the VM record after resizing + #TODO(mdietz): This is where we would update the VM record + #after resizing service = self.db.service_get_by_host_and_topic(context, migration_ref['dest_host'], FLAGS.compute_topic) @@ -449,8 +450,8 @@ class ComputeManager(manager.Manager): migration_ref['instance_id']) # this may get passed into the following spawn instead - self.driver.attach_disk(context, instance_ref) - self.driver.spawn(context, instance_ref, preexisting=True) + disk_info = self.driver.attach_disk(context, instance_ref) + self.driver.spawn(context, instance_ref, disk_info=disk_info) self.db.migration_update(context, migration_id, {'status': 'finished', }) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index ea4b7899b..7d88876e4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -106,7 +106,8 @@ class VMOps(object): instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) - VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, + userdevice=0, bootable=True) if network_ref: VMHelper.create_vif(self._session, vm_ref, diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index cc43050b4..8c756a7e3 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -152,7 +152,7 @@ class XenAPIConnection(object): """List VM instances""" return self._vmops.list_instances() - def spawn(self, instance): + def spawn(self, instance, disk_info=None): """Create VM instance""" self._vmops.spawn(instance) @@ -164,9 +164,6 @@ class XenAPIConnection(object): """Resize a VM instance""" raise NotImplementedError() - def attach_disk(self, instance_ref): - - def reboot(self, instance): """Reboot VM instance""" self._vmops.reboot(instance) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index e81b18a5e..0fb7b5806 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -70,6 +70,7 @@ def move_vhds_into_sr(session, args): instance_id = params['instance_id'] sr_path = get_sr_path(session) + sr_temp_path = "%s/images/" % sr_path # Discover the copied VHDs locally, and then set up paths to copy # them to under the SR @@ -77,7 +78,7 @@ def move_vhds_into_sr(session, args): source_base_copy_path = "%s/base_copy.vhd" % source_image_path source_cow_path = "%s/cow.vhd" % source_image_path - temp_vhd_path = "%s/instance%d/" % (sr_path, instance_id) + temp_vhd_path = "%s/instance%d/" % (sr_temp_path, instance_id) new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, str(uuid.uuid4())) new_cow_path = "%s/%s.vhd" % (temp_vhd_path, str(uuid.uuid4())) -- cgit From a6ce3b777221690df17137e70d6b7bf35ad10b02 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 10 Feb 2011 13:59:54 -0600 Subject: Spawn from disk --- nova/compute/manager.py | 2 +- nova/virt/xenapi/vmops.py | 47 ++++++++++++++++++++++++++--------------------- nova/virt/xenapi_conn.py | 4 ++-- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 54c3412f4..5f7d070af 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -451,7 +451,7 @@ class ComputeManager(manager.Manager): # this may get passed into the following spawn instead disk_info = self.driver.attach_disk(context, instance_ref) - self.driver.spawn(context, instance_ref, disk_info=disk_info) + self.driver.spawn(context, instance_ref, disk=disk_info) self.db.migration_update(context, migration_id, {'status': 'finished', }) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7d88876e4..ad46bb40d 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -69,7 +69,7 @@ class VMOps(object): LOG.debug(_("Starting instance %s"), instance.name) self._session.call_xenapi('VM.start', vm, False, False) - def spawn(self, instance): + def spawn(self, instance, disk): """Create VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is not None: @@ -83,27 +83,32 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - #if kernel is not present we must download a raw disk - if instance.kernel_id: - disk_image_type = ImageType.DISK + + vdi_ref = kernel = ramdisk = pv_kernel = None + + # Are we building from a pre-existing disk? + if not disk: + #if kernel is not present we must download a raw disk + if instance.kernel_id: + disk_image_type = ImageType.DISK + else: + disk_image_type = ImageType.DISK_RAW + vdi_uuid = VMHelper.fetch_image(self._session, instance.id, + instance.image_id, user, project, disk_image_type) + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + #Have a look at the VDI and see if it has a PV kernel + if not instance.kernel_id: + pv_kernel = VMHelper.lookup_image(self._session, instance.id, + vdi_ref) + if instance.kernel_id: + kernel = VMHelper.fetch_image(self._session, instance.id, + instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) + if instance.ramdisk_id: + ramdisk = VMHelper.fetch_image(self._session, instance.id, + instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) else: - disk_image_type = ImageType.DISK_RAW - vdi_uuid = VMHelper.fetch_image(self._session, instance.id, - instance.image_id, user, project, disk_image_type) - vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - #Have a look at the VDI and see if it has a PV kernel - pv_kernel = False - if not instance.kernel_id: - pv_kernel = VMHelper.lookup_image(self._session, instance.id, - vdi_ref) - kernel = None - if instance.kernel_id: - kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) - ramdisk = None - if instance.ramdisk_id: - ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', disk) + vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 8c756a7e3..2fddb8c7f 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -152,9 +152,9 @@ class XenAPIConnection(object): """List VM instances""" return self._vmops.list_instances() - def spawn(self, instance, disk_info=None): + def spawn(self, instance, disk=None): """Create VM instance""" - self._vmops.spawn(instance) + self._vmops.spawn(instance, disk) def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ -- cgit From a70ac6609713f2b610923a7ae382208f4d46b74a Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 10 Feb 2011 15:01:38 -0600 Subject: Typo fixes and some stupidity about the models --- nova/api/openstack/servers.py | 11 +++++------ nova/compute/manager.py | 2 +- nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py | 4 ++++ nova/virt/xenapi/vmops.py | 2 +- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 61dd3be36..06a40e92c 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -207,27 +207,26 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotImplemented()) def _action_confirm_resize(self, input_dict, req, id): - return fault.Fault(exc.HTTPNotImplemented()) + return faults.Fault(exc.HTTPNotImplemented()) def _action_revert_resize(self, input_dict, req, id): - return fault.Fault(exc.HTTPNotImplemented()) + return faults.Fault(exc.HTTPNotImplemented()) def _action_rebuild(self, input_dict, req, id): - return fault.Fault(exc.HTTPNotImplemented()) + return faults.Fault(exc.HTTPNotImplemented()) def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ try: - resize_flavor = input_dict['resize']['flavorId'] + flavor_id = input_dict['resize']['flavorId'] self.compute_api.resize(req.environ['nova.context'], id, flavor_id) except: return faults.Fault(exc.HTTPUnprocessableEntity()) - return fault.Fault(exc.HTTPAccepted()) + return faults.Fault(exc.HTTPAccepted()) def _action_reboot(self, input_dict, req, id): - #TODO(sandy): rebuild/resize not supported. try: reboot_type = input_dict['reboot']['type'] except Exception: diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 5f7d070af..9e8361563 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -401,7 +401,7 @@ class ComputeManager(manager.Manager): """Initiates the process of moving a running instance to another host, possibly changing the RAM and disk size in the process""" context = context.elevated() - instance_ref = self.db.instance_get(context. instance_id) + instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_create(context, { 'instance_id': instance_id, 'source_host': instance_ref['host'], diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py index 4d01cd874..499465fce 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py @@ -27,6 +27,10 @@ meta = MetaData() # migrations = Table('migrations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('source_host', String(255)), Column('dest_host', String(255)), diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index ad46bb40d..3b14390b4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -152,7 +152,7 @@ class VMOps(object): """ vm = None try: - if instance_or_vm.startswith("OpaqueRef:") + if instance_or_vm.startswith("OpaqueRef:"): # Got passed an opaque ref; return it return instance_or_vm else: -- cgit From 3fc68b805bb5326ef4fa2b8a51a58862ec23a6a4 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 10 Feb 2011 15:04:06 -0600 Subject: Forgot the metadata includes --- nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py index 499465fce..38b711775 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License.from sqlalchemy import * +from sqlalchemy import * from migrate import * from nova import log as logging -- cgit From 68b7ae27036e1a9b16ceb835c5dc6b934e3b964a Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 10 Feb 2011 15:06:27 -0600 Subject: Forgot the metadata includes --- nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py index 38b711775..02d9177bd 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py @@ -23,6 +23,12 @@ from nova import log as logging meta = MetaData() +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + # # New Tables # -- cgit From 363371ddc6bbe008a536bda06da016385850a98a Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 10 Feb 2011 17:20:10 -0600 Subject: Forgot the metadata includes --- nova/db/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/api.py b/nova/db/api.py index 5da0e9840..03232385c 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -86,7 +86,7 @@ def service_get(context, service_id): def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to""" - return IMPL.service_get(context, host, topic) + return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=False): -- cgit From 42bd44db235ed2b2fb10e05d70de8d04b0fa869d Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 11 Feb 2011 11:14:51 -0600 Subject: First, not all --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 98be39506..a6be844f8 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -164,7 +164,7 @@ def service_get_by_host_and_topic(context, host, topic): filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ - all() + first() @require_admin_context def service_get_all_by_host(context, host): -- cgit From f181051ac04084f2937438b61c988804fc2ef845 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 11 Feb 2011 17:39:04 -0600 Subject: Cast to host --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 9e8361563..63632b538 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -411,7 +411,7 @@ class ComputeManager(manager.Manager): service = self.db.service_get_by_host_and_topic(context, instance_ref['host'], FLAGS.compute_topic) topic = self.db.queue_get_for(context, FLAGS.compute_topic, - service['id']) + service['host']) rpc.cast(context, topic, { 'method': 'resize_instance', 'migration_id': migration_ref['id'], }) @@ -435,7 +435,7 @@ class ComputeManager(manager.Manager): service = self.db.service_get_by_host_and_topic(context, migration_ref['dest_host'], FLAGS.compute_topic) topic = self.db.queue_get_for(context, FLAGS.compute_topic, - service['id']) + service['host']) rpc.cast(context, topic, { 'method': 'finish_resize', 'migration_id': migration_ref['id'], }) -- cgit From 66365ece306023c1cf848d452d5af2c418e4e14c Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 11 Feb 2011 18:04:00 -0600 Subject: More typos --- nova/compute/manager.py | 12 ++++++++++-- nova/db/sqlalchemy/api.py | 4 ++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 63632b538..9aa163b73 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -414,7 +414,11 @@ class ComputeManager(manager.Manager): service['host']) rpc.cast(context, topic, { 'method': 'resize_instance', - 'migration_id': migration_ref['id'], }) + 'args': { + 'migration_id': migration_ref['id'], + 'instance_id': instance_id, + }, + }) @exception.wrap_exception @checks_instance_lock @@ -438,7 +442,11 @@ class ComputeManager(manager.Manager): service['host']) rpc.cast(context, topic, { 'method': 'finish_resize', - 'migration_id': migration_ref['id'], }) + 'args': { + 'migration_id': migration_ref['id'], + 'instance_id': instance_id, + }, + }) @exception.wrap_exception @checks_instance_lock diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a6be844f8..af343bc56 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1941,7 +1941,7 @@ def migration_update(context, migration_id, values): @require_admin_context def migration_get(context, migration_id): session = get_session() - result = session.query(models.Migration.\ + result = session.query(models.Migration).\ filter_by(migration_id=migration_id)).first() if not result: raise exception.NotFound(_("No migration found with id %s") @@ -1952,7 +1952,7 @@ def migration_get(context, migration_id): @require_admin_context def migration_get_by_instance(context, instance_id): session = get_session() - result = session.query(models.Migration.\ + result = session.query(models.Migration).\ filter_by(instance_id=instance_id)).first() if not result: raise exception.NotFound(_("No migration found with instance id %s") -- cgit From 384a5aff50926784590ad66b92919b4d0408319d Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 11 Feb 2011 18:05:02 -0600 Subject: More typos --- nova/db/sqlalchemy/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index af343bc56..6d52790a5 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1942,7 +1942,7 @@ def migration_update(context, migration_id, values): def migration_get(context, migration_id): session = get_session() result = session.query(models.Migration).\ - filter_by(migration_id=migration_id)).first() + filter_by(migration_id=migration_id).first() if not result: raise exception.NotFound(_("No migration found with id %s") % migration_id) @@ -1953,7 +1953,7 @@ def migration_get(context, migration_id): def migration_get_by_instance(context, instance_id): session = get_session() result = session.query(models.Migration).\ - filter_by(instance_id=instance_id)).first() + filter_by(instance_id=instance_id).first() if not result: raise exception.NotFound(_("No migration found with instance id %s") % migration_id) -- cgit From f3b25fc06e3eff6f1b0e8fed4a0bf90612bf0230 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 11 Feb 2011 18:07:34 -0600 Subject: More typos --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 9aa163b73..7c9e918fb 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -422,7 +422,7 @@ class ComputeManager(manager.Manager): @exception.wrap_exception @checks_instance_lock - def resize_instance(self, context, migration_id): + def resize_instance(self, context, instance_id, migration_id): """Starts the migration of a running instance to another host""" migration_ref = self.db.migration_get(context, migration_id) self.db.migration_update(context, migration_id, @@ -443,14 +443,14 @@ class ComputeManager(manager.Manager): rpc.cast(context, topic, { 'method': 'finish_resize', 'args': { - 'migration_id': migration_ref['id'], + 'migration_id': migration_id, 'instance_id': instance_id, }, }) @exception.wrap_exception @checks_instance_lock - def finish_resize(self, context, migration_id): + def finish_resize(self, context, instance_id, migration_id): """Completes the migration process by setting up the newly transferred disk and turning on the instance on its new host machine""" migration_ref = self.db.migration_get(context, migration_id) -- cgit From 520b1b50bc2b1d039ad2f89d791bba21b7a35f05 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 11 Feb 2011 18:09:11 -0600 Subject: More typos --- nova/db/sqlalchemy/api.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 6d52790a5..8566bb91f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1930,19 +1930,19 @@ def migration_create(context, values): @require_admin_context -def migration_update(context, migration_id, values): +def migration_update(context, id, values): session = get_session() with session.begin(): - migration = migration_get(context, migration_id, session=session) + migration = migration_get(context, id, session=session) migration.update(values) return migration @require_admin_context -def migration_get(context, migration_id): +def migration_get(context, id): session = get_session() result = session.query(models.Migration).\ - filter_by(migration_id=migration_id).first() + filter_by(id=id).first() if not result: raise exception.NotFound(_("No migration found with id %s") % migration_id) -- cgit From 252ebfe9a039fb883e3e88eda8feafae037e750e Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 11 Feb 2011 18:12:18 -0600 Subject: More typos --- nova/db/api.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/db/api.py b/nova/db/api.py index 03232385c..887f57885 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -259,6 +259,9 @@ def floating_ip_get_by_address(context, address): #################### +def migration_update(context, id, values): + """Update a migration instance""" + return IMPL.migration_update(context, id, values) def migration_create(context, values): """Create a migration record""" -- cgit From 875c4e1bab5e364a23695e46df69f1b21d9a8200 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 12:44:07 -0600 Subject: Derp --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 8566bb91f..861d13716 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1933,7 +1933,7 @@ def migration_create(context, values): def migration_update(context, id, values): session = get_session() with session.begin(): - migration = migration_get(context, id, session=session) + migration = migration_get(context, id) migration.update(values) return migration -- cgit From 4f23e417bb5ac3db8ac28dfb4b032a3e233c9821 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 12:50:54 -0600 Subject: More fixes --- nova/compute/manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7c9e918fb..3fd37d831 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -425,10 +425,11 @@ class ComputeManager(manager.Manager): def resize_instance(self, context, instance_id, migration_id): """Starts the migration of a running instance to another host""" migration_ref = self.db.migration_get(context, migration_id) + instance_ref = self.db.instance_get(context, instance_id) self.db.migration_update(context, migration_id, { 'status': 'migrating', }) - self.driver.migrate_disk_and_power_off(context, instance, + self.driver.migrate_disk_and_power_off(instance_ref, migration_ref['dest_host']) self.db.migration_update(context, migration_id, -- cgit From 1631196f3f277608fb0569c7242a7d8391605d0d Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 13:38:05 -0600 Subject: wharrgarbl --- nova/virt/xenapi/vmops.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3b14390b4..d5b2c821c 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -230,7 +230,7 @@ class VMOps(object): label = "%s-snapshot" % instance.name try: - _, template_vdi_uuids = VMHelper.create_snapshot( + vdi_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) return Snapshot(self, instance, template_vdi_uuids) except self.XenAPI.Failure, exc: @@ -262,22 +262,21 @@ class VMOps(object): # Now power down the instance and transfer the COW VHD self._shutdown(instance, method='clean') - _, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) params = {'host':dest, 'vdi_uuid': vm_vdi_rec['uuid'], 'dest_name': 'cow.vhd'} self._session.async_call_plugin('data_transfer', 'transfer_vhd', {'params': pickle.dumps(params)}) return snapshot.vdi_uuids[1], vm_vdi_rec['uuid'] - def attach_disk(self, instance): + def attach_disk(self, instance):hh vm_ref = VMHelper.lookup(self._session, instance.name) params = { 'instance_id': instance.id } self._session.async_call_plugin('migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)}) - - _, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) VMHelper.scan_sr(self._session, instance.id, vm_vdi_rec['SR']) def resize(self, instance, flavor): -- cgit From 9f22390532332b955cb8d78ebfd8cf9670a63ac8 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 13:50:06 -0600 Subject: Snapshot correctly --- nova/virt/xenapi/vmops.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d5b2c821c..fc5cd84e1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -211,10 +211,11 @@ class VMOps(object): def _get_snapshot(self, instance): class Snapshot(object): - def __init__(self, virt, instance, vdis): + def __init__(self, virt, instance, vm_ref, vdis): self.instance = instance self.vdi_uuids = vdis self.virt = virt + self.vm_ref = vm_ref def __enter__(self): return self @@ -230,9 +231,10 @@ class VMOps(object): label = "%s-snapshot" % instance.name try: - vdi_ref, template_vdi_uuids = VMHelper.create_snapshot( + template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) - return Snapshot(self, instance, template_vdi_uuids) + return Snapshot(self, instance, template_vm_ref, + template_vdi_uuids) except self.XenAPI.Failure, exc: logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s") % locals()) @@ -269,7 +271,7 @@ class VMOps(object): {'params': pickle.dumps(params)}) return snapshot.vdi_uuids[1], vm_vdi_rec['uuid'] - def attach_disk(self, instance):hh + def attach_disk(self, instance): vm_ref = VMHelper.lookup(self._session, instance.name) params = { 'instance_id': instance.id } -- cgit From fc8394a80b28f94561aa9ebf94c067ce2d1efd3b Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 14:25:00 -0600 Subject: Snapshot correctly --- nova/virt/xenapi/vmops.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index fc5cd84e1..a327f1d36 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -22,6 +22,7 @@ Management class for VM-related functions (spawn, reboot, etc). import json import M2Crypto import os +import pickle import subprocess import tempfile import uuid @@ -262,7 +263,7 @@ class VMOps(object): {'params': pickle.dumps(params)}) # Now power down the instance and transfer the COW VHD - self._shutdown(instance, method='clean') + self._shutdown(instance, snapshot.vm_ref, method='clean') vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) params = {'host':dest, 'vdi_uuid': vm_vdi_rec['uuid'], -- cgit From 411d828fc3511a09420e579ceee65a9470242509 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 14:40:58 -0600 Subject: hurr --- nova/virt/xenapi/vm_utils.py | 38 +++++++++++++++++++++----------------- nova/virt/xenapi/vmops.py | 8 +++++--- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index e16662aad..2c4ae6aa2 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -238,6 +238,24 @@ class VMHelper(HelperBase): % locals()) return vdi_ref + @classmethod + def get_vdi_for_vm_safely(cls, session, vm_ref): + vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) + if vdi_refs is None: + raise Exception(_("No VDIs found for VM %s") % vm_ref) + else: + num_vdis = len(vdi_refs) + if num_vdis != 1: + raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found" + " for VM %(vm_ref)s") % locals()) + + vdi_ref = vdi_refs[0] + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + return vdi_ref, vdi_rec + + + + @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, @@ -248,7 +266,7 @@ class VMHelper(HelperBase): LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...") % locals()) - vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + vm_vdi_ref, vm_vdi_rec = self.get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] sr_ref = vm_vdi_rec["SR"] @@ -256,7 +274,8 @@ class VMHelper(HelperBase): task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) template_vm_ref = session.wait_for_task(instance_id, task) - template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] + template_vdi_rec = self.get_vdi_for_vm_safely(session, + template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] LOG.debug(_('Created snapshot %(template_vm_ref)s from' @@ -568,21 +587,6 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, return parent_uuid -def get_vdi_for_vm_safely(session, vm_ref): - vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) - if vdi_refs is None: - raise Exception(_("No VDIs found for VM %s") % vm_ref) - else: - num_vdis = len(vdi_refs) - if num_vdis != 1: - raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found" - " for VM %(vm_ref)s") % locals()) - - vdi_ref = vdi_refs[0] - vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) - return vdi_ref, vdi_rec - - def find_sr(session): host = session.get_xenapi_host() srs = session.get_xenapi().SR.get_all() diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a327f1d36..6a7308c74 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -263,9 +263,10 @@ class VMOps(object): {'params': pickle.dumps(params)}) # Now power down the instance and transfer the COW VHD - self._shutdown(instance, snapshot.vm_ref, method='clean') + self._shutdown(instance, vm_ref, method='clean') - vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + vdi_ref, vm_vdi_rec = \ + VMHelper.get_vdi_for_vm_safely(session, vm_ref) params = {'host':dest, 'vdi_uuid': vm_vdi_rec['uuid'], 'dest_name': 'cow.vhd'} self._session.async_call_plugin('data_transfer', 'transfer_vhd', @@ -279,7 +280,8 @@ class VMOps(object): self._session.async_call_plugin('migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)}) - vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + vdi_ref, vm_vdi_rec = \ + VMHelper.get_vdi_for_vm_safely(session, vm_ref) VMHelper.scan_sr(self._session, instance.id, vm_vdi_rec['SR']) def resize(self, instance, flavor): -- cgit From 7bb6122549ad5ac549465f0012020f8e5dc9d506 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 15:26:08 -0600 Subject: Some refactoring --- nova/compute/manager.py | 4 ++-- nova/virt/xenapi/vm_utils.py | 12 ++++++++---- nova/virt/xenapi/vmops.py | 13 ++++++++----- nova/virt/xenapi_conn.py | 2 +- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 8 ++++++-- 5 files changed, 25 insertions(+), 14 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3fd37d831..0b0966324 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -405,7 +405,7 @@ class ComputeManager(manager.Manager): migration_ref = self.db.migration_create(context, { 'instance_id': instance_id, 'source_host': instance_ref['host'], - 'dest_host': socket.gethostbyname(socket.gethostname()), + 'dest_host': socket.gethostname(), 'status': 'pre-migrating' }) LOG.audit(_('instance %s: migrating to '), instance_id, context=context) service = self.db.service_get_by_host_and_topic(context, @@ -459,7 +459,7 @@ class ComputeManager(manager.Manager): migration_ref['instance_id']) # this may get passed into the following spawn instead - disk_info = self.driver.attach_disk(context, instance_ref) + disk_info = self.driver.attach_disk(instance_ref) self.driver.spawn(context, instance_ref, disk=disk_info) self.db.migration_update(context, migration_id, diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2c4ae6aa2..eeb5502ed 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -266,7 +266,7 @@ class VMHelper(HelperBase): LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...") % locals()) - vm_vdi_ref, vm_vdi_rec = self.get_vdi_for_vm_safely(session, vm_ref) + vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] sr_ref = vm_vdi_rec["SR"] @@ -274,7 +274,7 @@ class VMHelper(HelperBase): task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) template_vm_ref = session.wait_for_task(instance_id, task) - template_vdi_rec = self.get_vdi_for_vm_safely(session, + template_vdi_rec = cls.get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] @@ -287,6 +287,12 @@ class VMHelper(HelperBase): #TODO(sirp): we need to assert only one parent, not parents two deep return template_vm_ref, [template_vdi_uuid, parent_uuid] + @classmethod + def get_sr(cls, session, sr_label='slices'): + """ Finds the SR named by the given name label and returns + the UUID """ + return session.call_xenapi('SR.get_by_name_label', sr_label)[0] + @classmethod def upload_image(cls, session, instance_id, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and @@ -504,8 +510,6 @@ class VMHelper(HelperBase): session.wait_for_task(instance_id, task) - - def get_rrd(host, uuid): """Return the VM RRD XML as a string""" try: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6a7308c74..470b6ea8c 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -266,7 +266,7 @@ class VMOps(object): self._shutdown(instance, vm_ref, method='clean') vdi_ref, vm_vdi_rec = \ - VMHelper.get_vdi_for_vm_safely(session, vm_ref) + VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) params = {'host':dest, 'vdi_uuid': vm_vdi_rec['uuid'], 'dest_name': 'cow.vhd'} self._session.async_call_plugin('data_transfer', 'transfer_vhd', @@ -277,12 +277,15 @@ class VMOps(object): vm_ref = VMHelper.lookup(self._session, instance.name) params = { 'instance_id': instance.id } - self._session.async_call_plugin('migration', 'move_vhds_into_sr', + new_base_copy_uuid, new_cow_uuid = self._session.async_call_plugin( + 'migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)}) - vdi_ref, vm_vdi_rec = \ - VMHelper.get_vdi_for_vm_safely(session, vm_ref) - VMHelper.scan_sr(self._session, instance.id, vm_vdi_rec['SR']) + # Now we rescan the SR so we find the VHDs + sr_ref = VMHelper.get_sr(self._session) + VMHelper.scan_sr(self._session, instance.id, sr_ref) + + return new_base_copy_uuid def resize(self, instance, flavor): """Resize a running instance by changing it's RAM and disk size """ diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 2fddb8c7f..6869ce8d8 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -199,7 +199,7 @@ class XenAPIConnection(object): def attach_disk(self, instance): """Moves the copied VDIs into the SR""" - self._vmops.attach_disk(instance) + return self._vmops.attach_disk(instance) def suspend(self, instance, callback): """suspend the specified instance""" diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 0fb7b5806..71d4473c5 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -78,9 +78,12 @@ def move_vhds_into_sr(session, args): source_base_copy_path = "%s/base_copy.vhd" % source_image_path source_cow_path = "%s/cow.vhd" % source_image_path + new_base_copy_uuid = str(uuid.uuid4()) + new_cow_uuid = str(uuid.uuid4()) + temp_vhd_path = "%s/instance%d/" % (sr_temp_path, instance_id) - new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, str(uuid.uuid4())) - new_cow_path = "%s/%s.vhd" % (temp_vhd_path, str(uuid.uuid4())) + new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid) + new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid) os.mkdir(temp_vhd_path) shutil.move(source_base_copy_path, new_base_copy_path) @@ -94,6 +97,7 @@ def move_vhds_into_sr(session, args): shutil.move("%s/*.vhd" % temp_vhd_path, sr_path) os.rmdir(temp_vhd_path) + return (new_base_copy_uuid, new_cow_uuid) def transfer_vhd(session, args): -- cgit From fad5baf307b74a92fd5b9d8e2d1479f558e180aa Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 15:55:52 -0600 Subject: hurr --- nova/virt/xenapi/vm_utils.py | 12 ++++++++---- nova/virt/xenapi/vmops.py | 13 ++++++++----- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 7 ++++--- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index eeb5502ed..23f9547d7 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -504,10 +504,14 @@ class VMHelper(HelperBase): return {"Unable to retrieve diagnostics": e} @classmethod - def scan_sr(cls, session, instance_id, sr_ref): - LOG.debug(_("Re-scanning SR %s"), sr_ref) - task = session.call_xenapi('Async.SR.scan', sr_ref) - session.wait_for_task(instance_id, task) + def scan_sr(cls, session, instance_id=None, sr_ref=None): + if sr_ref: + LOG.debug(_("Re-scanning SR %s"), sr_ref) + task = session.call_xenapi('Async.SR.scan', sr_ref) + session.wait_for_task(instance_id, task) + else: + sr_ref = cls.get_sr(session) + session.call_xen_api('SR.scan', sr_ref) def get_rrd(host, uuid): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 470b6ea8c..17d42d542 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -276,14 +276,17 @@ class VMOps(object): def attach_disk(self, instance): vm_ref = VMHelper.lookup(self._session, instance.name) - params = { 'instance_id': instance.id } - new_base_copy_uuid, new_cow_uuid = self._session.async_call_plugin( - 'migration', 'move_vhds_into_sr', + new_base_copy_uuid = str(uuid.uuid4()) + + params = { 'instance_id': instance.id, + 'new_base_copy_uuid': new_base_copy_uuid, + 'new_cow_uuid': str(uuid.uuid4() } + + self._session.async_call_plugin('migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)}) # Now we rescan the SR so we find the VHDs - sr_ref = VMHelper.get_sr(self._session) - VMHelper.scan_sr(self._session, instance.id, sr_ref) + VMHelper.scan_sr(self._session) return new_base_copy_uuid diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 71d4473c5..e73480445 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -69,6 +69,9 @@ def move_vhds_into_sr(session, args): params = pickle.dumps(args) instance_id = params['instance_id'] + new_base_copy_uuid = params['new_base_copy_uuid'] + new_cow_uuid = params['new_cow_uuid'] + sr_path = get_sr_path(session) sr_temp_path = "%s/images/" % sr_path @@ -78,8 +81,6 @@ def move_vhds_into_sr(session, args): source_base_copy_path = "%s/base_copy.vhd" % source_image_path source_cow_path = "%s/cow.vhd" % source_image_path - new_base_copy_uuid = str(uuid.uuid4()) - new_cow_uuid = str(uuid.uuid4()) temp_vhd_path = "%s/instance%d/" % (sr_temp_path, instance_id) new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid) @@ -97,7 +98,7 @@ def move_vhds_into_sr(session, args): shutil.move("%s/*.vhd" % temp_vhd_path, sr_path) os.rmdir(temp_vhd_path) - return (new_base_copy_uuid, new_cow_uuid) + return None def transfer_vhd(session, args): -- cgit From 9a71c79dc3beb554c86a1b1b5d03ab66c6e96edc Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 16:24:51 -0600 Subject: Typo fixes --- nova/compute/manager.py | 2 +- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/vmops.py | 2 +- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 3 +-- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0b0966324..23d2b80ac 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -460,7 +460,7 @@ class ComputeManager(manager.Manager): # this may get passed into the following spawn instead disk_info = self.driver.attach_disk(instance_ref) - self.driver.spawn(context, instance_ref, disk=disk_info) + self.driver.spawn(instance_ref, disk=disk_info) self.db.migration_update(context, migration_id, {'status': 'finished', }) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 23f9547d7..08064b786 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -511,7 +511,7 @@ class VMHelper(HelperBase): session.wait_for_task(instance_id, task) else: sr_ref = cls.get_sr(session) - session.call_xen_api('SR.scan', sr_ref) + session.call_xenapi('SR.scan', sr_ref) def get_rrd(host, uuid): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 17d42d542..6c6d04dbf 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -280,7 +280,7 @@ class VMOps(object): params = { 'instance_id': instance.id, 'new_base_copy_uuid': new_base_copy_uuid, - 'new_cow_uuid': str(uuid.uuid4() } + 'new_cow_uuid': str(uuid.uuid4()) } self._session.async_call_plugin('migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index e73480445..cf7f378fa 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -25,7 +25,6 @@ import os.path import pickle import shutil import subprocess -import uuid import XenAPIPlugin @@ -114,7 +113,7 @@ def transfer_vhd(session, args): source_path = "%s/%s" % (sr_path, vhd_path) dest_path = '%s:%sinstance%d/%s' % (host, IMAGE_PATH, instance_id, dest_name) - rsync_args = [['nohup', RSYNC, '-av', '--progress', + rsync_args = ['nohup', RSYNC, '-av', '--progress', '-e "ssh -o StrictHostKeyChecking=no"', source_path, dest_path] if subprocess.call(rsync_args) != 0: -- cgit From 0bd48e3d53c6fce04b0c5e483537b3fd31c7364a Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 17:24:33 -0600 Subject: bad plugin --- nova/virt/xenapi/vmops.py | 2 +- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6c6d04dbf..5ab73d562 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -269,7 +269,7 @@ class VMOps(object): VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) params = {'host':dest, 'vdi_uuid': vm_vdi_rec['uuid'], 'dest_name': 'cow.vhd'} - self._session.async_call_plugin('data_transfer', 'transfer_vhd', + self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) return snapshot.vdi_uuids[1], vm_vdi_rec['uuid'] diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index cf7f378fa..c68fc93c5 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -120,4 +120,5 @@ def transfer_vhd(session, args): raise Exception("Unexpected VHD transfer failure") if __name__ == '__main__': - XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd}) + XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd, + 'move_vhd_into_sr':move_vhd_into_sr, }) -- cgit From e44a91ced3d19a3bca10457239592307bf6f829b Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 17:31:20 -0600 Subject: bad plugin --- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index c68fc93c5..c1f5b7528 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -65,7 +65,7 @@ def find_sr(session): def move_vhds_into_sr(session, args): """Moves the VHDs from their copied location to the SR""" - params = pickle.dumps(args) + params = pickle.loads(exists(args, 'params')) instance_id = params['instance_id'] new_base_copy_uuid = params['new_base_copy_uuid'] @@ -102,7 +102,7 @@ def move_vhds_into_sr(session, args): def transfer_vhd(session, args): """Rsyncs a VHD to an adjacent host""" - params = pickle.dumps(args) + params = pickle.loads(exists(args, 'params')) instance_id = params['instance_id'] host = params['host'] vdi_uuid = params['vdi_uuid'] @@ -121,4 +121,4 @@ def transfer_vhd(session, args): if __name__ == '__main__': XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd, - 'move_vhd_into_sr':move_vhd_into_sr, }) + 'move_vhds_into_sr':move_vhds_into_sr, }) -- cgit From b7cf8f233a585043f0aa85f4d26dc2fb5a6701c7 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 17:34:54 -0600 Subject: bad plugin --- nova/virt/xenapi/vmops.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5ab73d562..ba0db22f1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -258,7 +258,8 @@ class VMOps(object): #pretty fugly with self._get_snapshot(instance) as snapshot: params = {'host':dest, 'vdi_uuid':snapshot.vdi_uuids[1], - 'dest_name':'base_copy.vhd'} + 'dest_name': 'base_copy.vhd', + 'instance_id': instance.id, } self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) @@ -268,7 +269,8 @@ class VMOps(object): vdi_ref, vm_vdi_rec = \ VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) params = {'host':dest, 'vdi_uuid': vm_vdi_rec['uuid'], - 'dest_name': 'cow.vhd'} + 'dest_name': 'cow.vhd', + 'instance_id': instance.id, } self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) return snapshot.vdi_uuids[1], vm_vdi_rec['uuid'] -- cgit From 3014c0896202b592858fc1a7fc9c29b92a6f5d1b Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 18:04:07 -0600 Subject: plugin --- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index c1f5b7528..9c56cb379 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -111,14 +111,18 @@ def transfer_vhd(session, args): vhd_path = "%s.vhd" % vdi_uuid source_path = "%s/%s" % (sr_path, vhd_path) - dest_path = '%s:%sinstance%d/%s' % (host, IMAGE_PATH, instance_id, - dest_name) - rsync_args = ['nohup', RSYNC, '-av', '--progress', - '-e "ssh -o StrictHostKeyChecking=no"', source_path, dest_path] + dest_path = '%sinstance%d/' % (IMAGE_PATH, instance_id) + + dest_path_with_vhd="$s:%s/%s" % (host, dest_path, dest_name) + ssh_cmd = '-e "ssh -o StrictHostKeyChecking=no \'mkdir -p %s\' " ' % dest_path + + rsync_args = ['nohup', RSYNC, '-av', '--progress', ssh_cmd, source_path, + dest_path_with_vhd] if subprocess.call(rsync_args) != 0: raise Exception("Unexpected VHD transfer failure") + if __name__ == '__main__': XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd, 'move_vhds_into_sr':move_vhds_into_sr, }) -- cgit From e7fe96453760320ef897b9edfc39e057d565e6c0 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 23:22:37 -0600 Subject: Refactored --- nova/compute/manager.py | 9 ++-- nova/virt/xenapi/vmops.py | 50 ++++++++++++---------- nova/virt/xenapi_conn.py | 2 +- .../xenserver/xenapi/etc/xapi.d/plugins/migration | 17 ++++---- 4 files changed, 43 insertions(+), 35 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 23d2b80ac..2308c8315 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -429,14 +429,15 @@ class ComputeManager(manager.Manager): self.db.migration_update(context, migration_id, { 'status': 'migrating', }) - self.driver.migrate_disk_and_power_off(instance_ref, + disk_info = self.driver.migrate_disk_and_power_off(instance_ref, migration_ref['dest_host']) self.db.migration_update(context, migration_id, { 'status': 'post-migrating', }) + #TODO(mdietz): This is where we would update the VM record #after resizing - + service = self.db.service_get_by_host_and_topic(context, migration_ref['dest_host'], FLAGS.compute_topic) topic = self.db.queue_get_for(context, FLAGS.compute_topic, @@ -451,7 +452,7 @@ class ComputeManager(manager.Manager): @exception.wrap_exception @checks_instance_lock - def finish_resize(self, context, instance_id, migration_id): + def finish_resize(self, context, instance_id, migration_id, disk_info): """Completes the migration process by setting up the newly transferred disk and turning on the instance on its new host machine""" migration_ref = self.db.migration_get(context, migration_id) @@ -459,7 +460,7 @@ class ComputeManager(manager.Manager): migration_ref['instance_id']) # this may get passed into the following spawn instead - disk_info = self.driver.attach_disk(instance_ref) + new_disk_info = self.driver.attach_disk(instance_ref, disk_info) self.driver.spawn(instance_ref, disk=disk_info) self.db.migration_update(context, migration_id, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index ba0db22f1..127a09ad1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -103,17 +103,19 @@ class VMOps(object): vdi_ref) if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) + instance.kernel_id, user, project, + ImageType.KERNEL_RAMDISK) if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + instance.ramdisk_id, user, project, + ImageType.KERNEL_RAMDISK) else: vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', disk) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) - VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, - userdevice=0, bootable=True) + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=vdi_ref, userdevice=0, bootable=True) if network_ref: VMHelper.create_vif(self._session, vm_ref, @@ -234,7 +236,7 @@ class VMOps(object): try: template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) - return Snapshot(self, instance, template_vm_ref, + return Snapshot(self, instance, template_vm_ref, template_vdi_uuids) except self.XenAPI.Failure, exc: logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s") @@ -254,35 +256,40 @@ class VMOps(object): # identify it via the VBD. The base copy is the parent_uuid returned # from the snapshot creation - #TODO(mdietz): explicitly forcing the base_copy and cow names is - #pretty fugly + base_copy_uuid = cow_uuid = None with self._get_snapshot(instance) as snapshot: - params = {'host':dest, 'vdi_uuid':snapshot.vdi_uuids[1], - 'dest_name': 'base_copy.vhd', + # transfer the base copy + base_copy_uuid = snapshot.vdi_uuids[1] + vdi_ref, vm_vdi_rec = \ + VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) + cow_uuid = vm_vdi_rec['uuid'] + + params = {'host': dest, 'vdi_uuid': base_copy_uuid, 'instance_id': instance.id, } + self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) # Now power down the instance and transfer the COW VHD self._shutdown(instance, vm_ref, method='clean') - vdi_ref, vm_vdi_rec = \ - VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) - params = {'host':dest, 'vdi_uuid': vm_vdi_rec['uuid'], - 'dest_name': 'cow.vhd', + params = {'host': dest, 'vdi_uuid': cow_uuid, 'instance_id': instance.id, } self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) - return snapshot.vdi_uuids[1], vm_vdi_rec['uuid'] - def attach_disk(self, instance): - vm_ref = VMHelper.lookup(self._session, instance.name) + # TODO(mdietz): we could also consider renaming these to something + # sensible so we don't need to blindly pass around dictionaries + return {'base_copy': base_copy_uuid, 'cow': cow_uuid} + def attach_disk(self, instance, disk_info): + vm_ref = VMHelper.lookup(self._session, instance.name) new_base_copy_uuid = str(uuid.uuid4()) - - params = { 'instance_id': instance.id, - 'new_base_copy_uuid': new_base_copy_uuid, - 'new_cow_uuid': str(uuid.uuid4()) } + params = {'instance_id': instance.id, + 'old_base_copy_uuid': disk_info['base_copy'], + 'old_cow_uuid': disk_info['cow'], + 'new_base_copy_uuid': new_base_copy_uuid, + 'new_cow_uuid': str(uuid.uuid4())} self._session.async_call_plugin('migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)}) @@ -290,7 +297,7 @@ class VMOps(object): # Now we rescan the SR so we find the VHDs VMHelper.scan_sr(self._session) - return new_base_copy_uuid + return new_base_copy_uuid def resize(self, instance, flavor): """Resize a running instance by changing it's RAM and disk size """ @@ -340,7 +347,6 @@ class VMOps(object): raise RuntimeError(resp_dict['message']) return resp_dict['message'] - def _shutdown(self, instance, vm, method='hard'): """Shutdown an instance """ state = self.get_info(instance['name'])['state'] diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 6869ce8d8..21892ca37 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -197,7 +197,7 @@ class XenAPIConnection(object): off the instance copies over the COW disk""" self._vmops.migrate_disk_and_power_off(instance, dest) - def attach_disk(self, instance): + def attach_disk(self, instance, disk_info): """Moves the copied VDIs into the SR""" return self._vmops.attach_disk(instance) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 9c56cb379..3d3ad4e67 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -68,6 +68,9 @@ def move_vhds_into_sr(session, args): params = pickle.loads(exists(args, 'params')) instance_id = params['instance_id'] + old_base_copy_uuid = params['old_base_copy_uuid'] + old_cow_uuid = params['old_cow_uuid'] + new_base_copy_uuid = params['new_base_copy_uuid'] new_cow_uuid = params['new_cow_uuid'] @@ -77,9 +80,9 @@ def move_vhds_into_sr(session, args): # Discover the copied VHDs locally, and then set up paths to copy # them to under the SR source_image_path = "%s/instance%d" % (IMAGE_PATH, instance_id) - source_base_copy_path = "%s/base_copy.vhd" % source_image_path - source_cow_path = "%s/cow.vhd" % source_image_path - + source_base_copy_path = "%s/%s.vhd" % (source_image_path, + old_base_copy_uuid) + source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid) temp_vhd_path = "%s/instance%d/" % (sr_temp_path, instance_id) new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid) @@ -106,18 +109,16 @@ def transfer_vhd(session, args): instance_id = params['instance_id'] host = params['host'] vdi_uuid = params['vdi_uuid'] - dest_name = params['dest_name'] sr_path = get_sr_path(session) vhd_path = "%s.vhd" % vdi_uuid source_path = "%s/%s" % (sr_path, vhd_path) - dest_path = '%sinstance%d/' % (IMAGE_PATH, instance_id) + dest_path = '%s:%sinstance%d/' % (host, IMAGE_PATH, instance_id) - dest_path_with_vhd="$s:%s/%s" % (host, dest_path, dest_name) - ssh_cmd = '-e "ssh -o StrictHostKeyChecking=no \'mkdir -p %s\' " ' % dest_path + ssh_cmd = '-e "ssh -o StrictHostKeyChecking=no " ' rsync_args = ['nohup', RSYNC, '-av', '--progress', ssh_cmd, source_path, - dest_path_with_vhd] + dest_path] if subprocess.call(rsync_args) != 0: raise Exception("Unexpected VHD transfer failure") -- cgit From 4574bcdfe303a76a46eb7579a5a70de4e54cc926 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 14 Feb 2011 23:58:21 -0600 Subject: Tons o loggin --- nova/compute/manager.py | 1 + nova/virt/xenapi_conn.py | 4 ++-- .../xenserver/xenapi/etc/xapi.d/plugins/migration | 20 +++++++++++++++++++- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 2308c8315..6a87bb6f1 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -447,6 +447,7 @@ class ComputeManager(manager.Manager): 'args': { 'migration_id': migration_id, 'instance_id': instance_id, + 'disk_info': disk_info, }, }) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 21892ca37..6d40d4615 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -195,11 +195,11 @@ class XenAPIConnection(object): def migrate_disk_and_power_off(self, instance, dest): """Transfers the VHD of a running instance to another host, then shuts off the instance copies over the COW disk""" - self._vmops.migrate_disk_and_power_off(instance, dest) + return self._vmops.migrate_disk_and_power_off(instance, dest) def attach_disk(self, instance, disk_info): """Moves the copied VDIs into the SR""" - return self._vmops.attach_disk(instance) + return self._vmops.attach_disk(instance, disk_info) def suspend(self, instance, callback): """suspend the specified instance""" diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 3d3ad4e67..63de5bfba 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -29,6 +29,7 @@ import subprocess import XenAPIPlugin from pluginlib_nova import * +configure_logging('migration') SSH_HOSTS = '/root/.ssh/known_hosts' DEVNULL = '/dev/null' @@ -88,17 +89,27 @@ def move_vhds_into_sr(session, args): new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid) new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid) + logging.debug('Creating temporary SR path' % temp_vhd_path) os.mkdir(temp_vhd_path) + + logging.debug('Moving %s into %s' % (source_base_copy_path, temp_vhd_path)) shutil.move(source_base_copy_path, new_base_copy_path) + + logging.debug('Moving %s into %s' % (source_cow_path, temp_vhd_path)) shutil.move(source_cow_path, new_cow_path) + logging.debug('Cleaning up %s' % source_image_path) os.rmdir(source_image_path) # Link the COW to the base copy + logging.debug('Attaching COW to the base copy...') subprocess.call([VHD_UTIL, 'modify', '-n', new_cow_path, '-p', new_base_copy_path]) + logging.debug('Moving VHDs into SR %s' % sr_path) shutil.move("%s/*.vhd" % temp_vhd_path, sr_path) + + loggin.debug('Cleaning up temporary SR path %s' % temp_vhd_path) os.rmdir(temp_vhd_path) return None @@ -115,12 +126,19 @@ def transfer_vhd(session, args): source_path = "%s/%s" % (sr_path, vhd_path) dest_path = '%s:%sinstance%d/' % (host, IMAGE_PATH, instance_id) + logging.debug("Preparing to transmit %s to %s" % (source_path, + dest_path)) + ssh_cmd = '-e "ssh -o StrictHostKeyChecking=no " ' rsync_args = ['nohup', RSYNC, '-av', '--progress', ssh_cmd, source_path, dest_path] - if subprocess.call(rsync_args) != 0: + logging.debug('rsync %s' % (' '.join(rsync_args, ))) + + rsync_proc = subprocess.POpen(rsync_args) + logging.debug('Rsync output: \n %s' % rsync_proc.communicate()[0]) + if rsync_proc.returncode != 0 raise Exception("Unexpected VHD transfer failure") -- cgit From bf82637cad867b0e8fb6ad868f60c6dcd66d7f97 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 15 Feb 2011 11:05:20 -0600 Subject: Better host acquisition --- nova/compute/manager.py | 7 ++++--- nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py | 3 ++- nova/db/sqlalchemy/models.py | 3 ++- nova/virt/xenapi_conn.py | 4 ++++ plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 2 +- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6a87bb6f1..7e929d715 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -404,12 +404,13 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_create(context, { 'instance_id': instance_id, - 'source_host': instance_ref['host'], - 'dest_host': socket.gethostname(), + 'source_compute': instance_ref['host'], + 'dest_compute': socket.gethostname(), + 'dest_host': self.driver.get_host_ip_addr(), 'status': 'pre-migrating' }) LOG.audit(_('instance %s: migrating to '), instance_id, context=context) service = self.db.service_get_by_host_and_topic(context, - instance_ref['host'], FLAGS.compute_topic) + migration_ref['source_compute'], FLAGS.compute_topic) topic = self.db.queue_get_for(context, FLAGS.compute_topic, service['host']) rpc.cast(context, topic, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py index 02d9177bd..4aab5bdc6 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py @@ -39,7 +39,8 @@ migrations = Table('migrations', meta, Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), - Column('source_host', String(255)), + Column('source_compute', String(255)), + Column('dest_compute', String(255)), Column('dest_host', String(255)), Column('instance_id', Integer, ForeignKey('instances.id'), nullable=True), diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index ebf3a382b..1c84e15dd 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -370,7 +370,8 @@ class Migration(BASE, NovaBase): """Represents a running host-to-host migration.""" __tablename__ = 'migrations' id = Column(Integer, primary_key=True, nullable=False) - source_host = Column(String(255)) + source_compute = Column(String(255)) + dest_compute = Column(String(255)) dest_host = Column(String(255)) instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) status = Column(String(255)) #TODO(_cerberus_): enum diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 6d40d4615..19b5269f5 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -224,6 +224,10 @@ class XenAPIConnection(object): def get_ajax_console(self, instance): """Return link to instance's ajax console""" return self._vmops.get_ajax_console(instance) + + def get_host_ip_addr(self): + xs_url = urlparse.urlpase(FLAGS.xenapi_connection_url) + return xs_url.netloc def attach_volume(self, instance_name, device_path, mountpoint): """Attach volume storage to VM instance""" diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 63de5bfba..97c970da5 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -136,7 +136,7 @@ def transfer_vhd(session, args): logging.debug('rsync %s' % (' '.join(rsync_args, ))) - rsync_proc = subprocess.POpen(rsync_args) + rsync_proc = subprocess.Popen(rsync_args) logging.debug('Rsync output: \n %s' % rsync_proc.communicate()[0]) if rsync_proc.returncode != 0 raise Exception("Unexpected VHD transfer failure") -- cgit From 03a8d1baae00a4150a02ac2f0b04c413dd3b00e0 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 15 Feb 2011 11:19:27 -0600 Subject: derp --- nova/compute/manager.py | 2 +- nova/virt/xenapi_conn.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7e929d715..546d07a09 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -440,7 +440,7 @@ class ComputeManager(manager.Manager): #after resizing service = self.db.service_get_by_host_and_topic(context, - migration_ref['dest_host'], FLAGS.compute_topic) + migration_ref['dest_compute'], FLAGS.compute_topic) topic = self.db.queue_get_for(context, FLAGS.compute_topic, service['host']) rpc.cast(context, topic, diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 19b5269f5..2671f1a7b 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -226,7 +226,7 @@ class XenAPIConnection(object): return self._vmops.get_ajax_console(instance) def get_host_ip_addr(self): - xs_url = urlparse.urlpase(FLAGS.xenapi_connection_url) + xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url) return xs_url.netloc def attach_volume(self, instance_name, device_path, mountpoint): -- cgit From c97d408842a4a5a8e9d379acc13c9c1f5871827f Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 15 Feb 2011 14:10:43 -0600 Subject: Plugin changes --- nova/virt/xenapi/vmops.py | 11 +++++++---- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 12 +++++++----- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 127a09ad1..882d52f38 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -267,16 +267,18 @@ class VMOps(object): params = {'host': dest, 'vdi_uuid': base_copy_uuid, 'instance_id': instance.id, } - self._session.async_call_plugin('migration', 'transfer_vhd', + task = self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) + self._session.wait_for_task(instance.id, task) # Now power down the instance and transfer the COW VHD self._shutdown(instance, vm_ref, method='clean') params = {'host': dest, 'vdi_uuid': cow_uuid, 'instance_id': instance.id, } - self._session.async_call_plugin('migration', 'transfer_vhd', + task = self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) + self._session.wait_for_task(instance.id, task) # TODO(mdietz): we could also consider renaming these to something # sensible so we don't need to blindly pass around dictionaries @@ -291,8 +293,9 @@ class VMOps(object): 'new_base_copy_uuid': new_base_copy_uuid, 'new_cow_uuid': str(uuid.uuid4())} - self._session.async_call_plugin('migration', 'move_vhds_into_sr', - {'params': pickle.dumps(params)}) + task = self._session.async_call_plugin('migration', + 'move_vhds_into_sr', {'params': pickle.dumps(params)}) + self._session.wait_for_task(instance.id, task) # Now we rescan the SR so we find the VHDs VMHelper.scan_sr(self._session) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 97c970da5..4a4ed0e73 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -129,17 +129,19 @@ def transfer_vhd(session, args): logging.debug("Preparing to transmit %s to %s" % (source_path, dest_path)) - ssh_cmd = '-e "ssh -o StrictHostKeyChecking=no " ' + ssh_cmd = '-e \'ssh -o StrictHostKeyChecking=no\' ' - rsync_args = ['nohup', RSYNC, '-av', '--progress', ssh_cmd, source_path, - dest_path] + rsync_args = ['nohup > /root/instance%d_progress' % instance_id, RSYNC, + '-av', '--progress', ssh_cmd, source_path, dest_path] logging.debug('rsync %s' % (' '.join(rsync_args, ))) - rsync_proc = subprocess.Popen(rsync_args) + rsync_proc = subprocess.Popen(rsync_args, stdout=subprocess.PIPE) logging.debug('Rsync output: \n %s' % rsync_proc.communicate()[0]) - if rsync_proc.returncode != 0 + logging.debug('Rsync return: %d' % rsync_proc.returncode) + if rsync_proc.returncode != 0: raise Exception("Unexpected VHD transfer failure") + return "" if __name__ == '__main__': -- cgit From 00f2905a5debc5835b742dab8dce003f53e33fc2 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 15 Feb 2011 14:29:31 -0600 Subject: plugin lol --- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 4a4ed0e73..bfc2a2ed4 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -89,7 +89,7 @@ def move_vhds_into_sr(session, args): new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid) new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid) - logging.debug('Creating temporary SR path' % temp_vhd_path) + logging.debug('Creating temporary SR path %s' % temp_vhd_path) os.mkdir(temp_vhd_path) logging.debug('Moving %s into %s' % (source_base_copy_path, temp_vhd_path)) @@ -129,10 +129,10 @@ def transfer_vhd(session, args): logging.debug("Preparing to transmit %s to %s" % (source_path, dest_path)) - ssh_cmd = '-e \'ssh -o StrictHostKeyChecking=no\' ' + ssh_cmd = 'ssh -o StrictHostKeyChecking=no' - rsync_args = ['nohup > /root/instance%d_progress' % instance_id, RSYNC, - '-av', '--progress', ssh_cmd, source_path, dest_path] + rsync_args = ['nohup', RSYNC, '-av', '--progress', '-e', ssh_cmd, + source_path, dest_path] logging.debug('rsync %s' % (' '.join(rsync_args, ))) -- cgit From 9f77e0a46cac2ebaf9a18c4a175099b208db1adb Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 15 Feb 2011 15:27:23 -0600 Subject: More plugin lol --- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index bfc2a2ed4..cc72b5d26 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -90,7 +90,7 @@ def move_vhds_into_sr(session, args): new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid) logging.debug('Creating temporary SR path %s' % temp_vhd_path) - os.mkdir(temp_vhd_path) + os.makedirs(temp_vhd_path) logging.debug('Moving %s into %s' % (source_base_copy_path, temp_vhd_path)) shutil.move(source_base_copy_path, new_base_copy_path) @@ -107,11 +107,12 @@ def move_vhds_into_sr(session, args): new_base_copy_path]) logging.debug('Moving VHDs into SR %s' % sr_path) - shutil.move("%s/*.vhd" % temp_vhd_path, sr_path) + shutil.move("%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid), sr_path) + shutil.move("%s/%s.vhd" % (temp_vhd_path, new_cow_uuid), sr_path) - loggin.debug('Cleaning up temporary SR path %s' % temp_vhd_path) + logginig.debug('Cleaning up temporary SR path %s' % temp_vhd_path) os.rmdir(temp_vhd_path) - return None + return "" def transfer_vhd(session, args): -- cgit From f6bf7e8c1e2481e870ed4baa9f2a6aa8001b5514 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 15 Feb 2011 16:46:17 -0600 Subject: fail --- nova/compute/manager.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 546d07a09..19e1d9f46 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -402,10 +402,14 @@ class ComputeManager(manager.Manager): host, possibly changing the RAM and disk size in the process""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) + if instance_ref['host'] == FLAGS.host: + raise exception.Error(_( + 'Migration error: destination same as source!')) + migration_ref = self.db.migration_create(context, { 'instance_id': instance_id, 'source_compute': instance_ref['host'], - 'dest_compute': socket.gethostname(), + 'dest_compute': FLAGS.host, 'dest_host': self.driver.get_host_ip_addr(), 'status': 'pre-migrating' }) LOG.audit(_('instance %s: migrating to '), instance_id, context=context) @@ -463,7 +467,7 @@ class ComputeManager(manager.Manager): # this may get passed into the following spawn instead new_disk_info = self.driver.attach_disk(instance_ref, disk_info) - self.driver.spawn(instance_ref, disk=disk_info) + self.driver.spawn(instance_ref, disk=new_disk_info) self.db.migration_update(context, migration_id, {'status': 'finished', }) -- cgit From a6ea6759450aab7eb021e202c68e5301667c74a9 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 15 Feb 2011 17:58:57 -0600 Subject: foo --- nova/virt/xenapi/vmops.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 882d52f38..c2f5ddc41 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -97,21 +97,22 @@ class VMOps(object): vdi_uuid = VMHelper.fetch_image(self._session, instance.id, instance.image_id, user, project, disk_image_type) vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - #Have a look at the VDI and see if it has a PV kernel - if not instance.kernel_id: - pv_kernel = VMHelper.lookup_image(self._session, instance.id, - vdi_ref) - if instance.kernel_id: - kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, - ImageType.KERNEL_RAMDISK) - if instance.ramdisk_id: - ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, - ImageType.KERNEL_RAMDISK) else: vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', disk) + #Have a look at the VDI and see if it has a PV kernel + if not instance.kernel_id: + pv_kernel = VMHelper.lookup_image(self._session, instance.id, + vdi_ref) + if instance.kernel_id: + kernel = VMHelper.fetch_image(self._session, instance.id, + instance.kernel_id, user, project, + ImageType.KERNEL_RAMDISK) + if instance.ramdisk_id: + ramdisk = VMHelper.fetch_image(self._session, instance.id, + instance.ramdisk_id, user, project, + ImageType.KERNEL_RAMDISK) + vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, -- cgit From cd5aba9d1d00d9daad87efd89f78e49079bee2c7 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 15 Feb 2011 18:02:57 -0600 Subject: foo --- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index cc72b5d26..5bf0fe994 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -110,7 +110,7 @@ def move_vhds_into_sr(session, args): shutil.move("%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid), sr_path) shutil.move("%s/%s.vhd" % (temp_vhd_path, new_cow_uuid), sr_path) - logginig.debug('Cleaning up temporary SR path %s' % temp_vhd_path) + logging.debug('Cleaning up temporary SR path %s' % temp_vhd_path) os.rmdir(temp_vhd_path) return "" -- cgit From bb98e2055002ff3ed2099f60bbe4058d5f5c7b35 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 15 Feb 2011 23:10:29 -0600 Subject: hurr durr --- nova/virt/xenapi/vmops.py | 5 +++-- plugins/xenserver/xenapi/etc/xapi.d/plugins/migration | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c2f5ddc41..7a176442a 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -288,11 +288,12 @@ class VMOps(object): def attach_disk(self, instance, disk_info): vm_ref = VMHelper.lookup(self._session, instance.name) new_base_copy_uuid = str(uuid.uuid4()) + new_cow_uuid = str(uuid.uuid4()) params = {'instance_id': instance.id, 'old_base_copy_uuid': disk_info['base_copy'], 'old_cow_uuid': disk_info['cow'], 'new_base_copy_uuid': new_base_copy_uuid, - 'new_cow_uuid': str(uuid.uuid4())} + 'new_cow_uuid': new_cow_uuid, } task = self._session.async_call_plugin('migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)}) @@ -301,7 +302,7 @@ class VMOps(object): # Now we rescan the SR so we find the VHDs VMHelper.scan_sr(self._session) - return new_base_copy_uuid + return new_cow_uuid def resize(self, instance, flavor): """Resize a running instance by changing it's RAM and disk size """ diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 5bf0fe994..7a6eefda2 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -102,7 +102,8 @@ def move_vhds_into_sr(session, args): os.rmdir(source_image_path) # Link the COW to the base copy - logging.debug('Attaching COW to the base copy...') + logging.debug('Attaching COW to the base copy %s -> %s' % + (new_cow_path, new_base_copy_path)) subprocess.call([VHD_UTIL, 'modify', '-n', new_cow_path, '-p', new_base_copy_path]) -- cgit From 98b038c6878772f6b272cb169b1c74bd7c9838b8 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 15 Feb 2011 23:56:00 -0600 Subject: Foo --- nova/api/openstack/servers.py | 12 ++++++++++-- nova/compute/api.py | 20 +++++++++++++++----- nova/compute/manager.py | 16 +++------------- nova/db/sqlalchemy/api.py | 1 + 4 files changed, 29 insertions(+), 20 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 06a40e92c..83b421127 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -207,10 +207,18 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotImplemented()) def _action_confirm_resize(self, input_dict, req, id): - return faults.Fault(exc.HTTPNotImplemented()) + try: + self.compute_api.confirm_resize(req.environ['nova.context'], id) + except: + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPNoContent() def _action_revert_resize(self, input_dict, req, id): - return faults.Fault(exc.HTTPNotImplemented()) + try: + self.compute_api.confirm_resize(req.environ['nova.context'], id) + except: + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() def _action_rebuild(self, input_dict, req, id): return faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/compute/api.py b/nova/compute/api.py index 6b2628378..b8c4a8597 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -401,16 +401,26 @@ class API(base.Base): def revert_resize(self, context, instance_id): """Reverts a resize, deleting the 'new' instance in the process""" + context = context.elevated() instance_ref = self.db.instance_get(instance_id) - self._cast_compute_message('revert_resize', context, instance_id, - instance_ref['host']) + self._cast_compute_message('revert_resize', context, instance_id) def confirm_resize(self, context, instance_id): """Confirms a migration/resize, deleting the 'old' instance in the process.""" - migration_ref = self.db.get_migration_by_instance_id(instance_id) - self._cast_compute_message('confirm_resize', context, instance_id, - migration_ref['source_host']) + context = context.elevated() + migration_ref = self.db.migration_get_by_instance_id(context, + instance_id) + if migration_ref['status'] != 'finished': + raise exception.Error(_("Migration has incorrect status %s" % + migration_ref['status'])) + instance_ref = self.db.instance_get(context, instance_id) + + self._cast_compute_message('terminate_instance', context, instance_id, + migration_ref['source_compute']) + + self.db.instance_update(context, instance_id, + {'host': migration_ref['dest_compute'], }) def resize(self, context, instance_id, flavor): """Resize a running instance.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 19e1d9f46..169509163 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -379,12 +379,7 @@ class ComputeManager(manager.Manager): def _update_state_callback(self, context, instance_id, result): """Update instance state when async task completes.""" self._update_state(context, instance_id) - - @exception.wrap_exception - @checks_instance_lock - def confirm_resize(self, context, instance_id): - """Destroys the old instance on the source machine""" - pass + @exception.wrap_exception @checks_instance_lock @@ -413,10 +408,8 @@ class ComputeManager(manager.Manager): 'dest_host': self.driver.get_host_ip_addr(), 'status': 'pre-migrating' }) LOG.audit(_('instance %s: migrating to '), instance_id, context=context) - service = self.db.service_get_by_host_and_topic(context, - migration_ref['source_compute'], FLAGS.compute_topic) topic = self.db.queue_get_for(context, FLAGS.compute_topic, - service['host']) + instance_ref['host']) rpc.cast(context, topic, { 'method': 'resize_instance', 'args': { @@ -446,7 +439,7 @@ class ComputeManager(manager.Manager): service = self.db.service_get_by_host_and_topic(context, migration_ref['dest_compute'], FLAGS.compute_topic) topic = self.db.queue_get_for(context, FLAGS.compute_topic, - service['host']) + migration_ref['dest_compute']) rpc.cast(context, topic, { 'method': 'finish_resize', 'args': { @@ -472,9 +465,6 @@ class ComputeManager(manager.Manager): self.db.migration_update(context, migration_id, {'status': 'finished', }) - # Cleans up any transferred files and unmounts things - self.driver.cleanup_disk_transfer(context, instance_ref['id']) - @exception.wrap_exception @checks_instance_lock def pause_instance(self, context, instance_id): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 861d13716..1b6eaf138 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1935,6 +1935,7 @@ def migration_update(context, id, values): with session.begin(): migration = migration_get(context, id) migration.update(values) + migration.save() return migration -- cgit From 8e536500e83b311bf8d006ca23234c50962dc6aa Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 16 Feb 2011 00:06:29 -0600 Subject: I fail at sessions --- nova/compute/manager.py | 1 - nova/db/sqlalchemy/api.py | 9 +++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 169509163..b405e3763 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -435,7 +435,6 @@ class ComputeManager(manager.Manager): #TODO(mdietz): This is where we would update the VM record #after resizing - service = self.db.service_get_by_host_and_topic(context, migration_ref['dest_compute'], FLAGS.compute_topic) topic = self.db.queue_get_for(context, FLAGS.compute_topic, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1b6eaf138..f96430e67 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1933,15 +1933,16 @@ def migration_create(context, values): def migration_update(context, id, values): session = get_session() with session.begin(): - migration = migration_get(context, id) + migration = migration_get(context, id, session=session) migration.update(values) - migration.save() + migration.save(session=session) return migration @require_admin_context -def migration_get(context, id): - session = get_session() +def migration_get(context, id, session=None): + if not session: + session = get_session() result = session.query(models.Migration).\ filter_by(id=id).first() if not result: -- cgit From c735796e0668b2bf7c45eeef6396a3fb33d22d6e Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 16 Feb 2011 00:14:26 -0600 Subject: I fail at sessions --- nova/compute/api.py | 2 +- nova/db/api.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index b8c4a8597..3fb852ab0 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -409,7 +409,7 @@ class API(base.Base): """Confirms a migration/resize, deleting the 'old' instance in the process.""" context = context.elevated() - migration_ref = self.db.migration_get_by_instance_id(context, + migration_ref = self.db.migration_get_by_instance(context, instance_id) if migration_ref['status'] != 'finished': raise exception.Error(_("Migration has incorrect status %s" % diff --git a/nova/db/api.py b/nova/db/api.py index 887f57885..9ed5efedb 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -271,9 +271,9 @@ def migration_get(context, migration_id): """Finds a migration by the id""" return IMPL.migration_get(context, migration_id) -def migration_get_by_instance_id(context, instance_id): +def migration_get_by_instance(context, instance_id): """Finds a migration by the instance id its migrating""" - return IMPL.migration_get_by_instance_id(context, instance_id) + return IMPL.migration_get_by_instance(context, instance_id) #################### -- cgit From 879845496a50477ebc2709291c159ae1e8d5aa2a Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 16 Feb 2011 13:47:14 -0600 Subject: Derp --- nova/compute/api.py | 26 +++++++++++++++++--------- nova/compute/manager.py | 32 +++++++++++++++++++++++++++++--- nova/db/sqlalchemy/api.py | 5 +++-- 3 files changed, 49 insertions(+), 14 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 3fb852ab0..58dea5db6 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -402,22 +402,30 @@ class API(base.Base): def revert_resize(self, context, instance_id): """Reverts a resize, deleting the 'new' instance in the process""" context = context.elevated() - instance_ref = self.db.instance_get(instance_id) - self._cast_compute_message('revert_resize', context, instance_id) + migration_ref = self.db.migration_get_by_instance_and_status(context, + instance_id, 'finished') + if not migration_ref: + raise exception.Error(_("No finished migrations found for + instance")) + + params = { 'migration_id': migration_ref['id']) + self._cast_compute_message('revert_resize', context, instance_id, + migration_ref['dest_compute'], params=params) def confirm_resize(self, context, instance_id): """Confirms a migration/resize, deleting the 'old' instance in the process.""" context = context.elevated() - migration_ref = self.db.migration_get_by_instance(context, - instance_id) - if migration_ref['status'] != 'finished': - raise exception.Error(_("Migration has incorrect status %s" % - migration_ref['status'])) + migration_ref = self.db.migration_get_by_instance_and_status(context, + instance_id, 'finished') + if not migration_ref: + raise exception.Error(_("No finished migrations found for + instance")) instance_ref = self.db.instance_get(context, instance_id) - self._cast_compute_message('terminate_instance', context, instance_id, - migration_ref['source_compute']) + params = { 'migration_id': migration_ref['id']) + self._cast_compute_message('confirm_resize', context, instance_id, + migration_ref['source_compute'], params=param) self.db.instance_update(context, instance_id, {'host': migration_ref['dest_compute'], }) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b405e3763..c05edd140 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -380,15 +380,41 @@ class ComputeManager(manager.Manager): """Update instance state when async task completes.""" self._update_state(context, instance_id) + @exception.wrap_exception + @echecks_instance_lock + def confirm_resize(self, context, instance_id, migration_id): + """ Destroys the source instance """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + migration_ref = self.db.migration_get(context, migration_id) + self.driver.destroy(instance_ref) + self.db.migration_update(context, migration_id, + { 'status': 'confirmed' }) @exception.wrap_exception @checks_instance_lock - def revert_resize(self, context, instance_id): + def revert_resize(self, context, instance_id, migration_id): """Destroys the new instance on the destination machine, reverts the model changes, and powers on the old instance on the source machine""" - pass - + instance_ref = self.db.instance_get(context, instance_id) + migration_ref = self.db.migration_get(context, migration_id) + + if migration_ref['source_compute'] == instance_ref['host']: + self.driver.power_on(instance_ref) + self.db.migration_update(context, migration_id, + { 'status': 'reverted' }) + else: + self.driver.destroy(instance_ref) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, + instance_ref['host']) + rpc.cast(context, topic, + { 'method': 'resize_instance', + 'args': { + 'migration_id': migration_ref['id'], + 'instance_id': instance_id, + }, + }) @exception.wrap_exception @checks_instance_lock diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f96430e67..62484805c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1952,10 +1952,11 @@ def migration_get(context, id, session=None): @require_admin_context -def migration_get_by_instance(context, instance_id): +def migration_get_by_instance_and_status(context, instance_id, status): session = get_session() result = session.query(models.Migration).\ - filter_by(instance_id=instance_id).first() + filter_by(instance_id=instance_id). + filter_by(status=status).first() if not result: raise exception.NotFound(_("No migration found with instance id %s") % migration_id) -- cgit From 905cf54f06f6dde95039599ae5ea30d2f070f398 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 16 Feb 2011 13:53:47 -0600 Subject: Typos --- nova/compute/api.py | 8 ++++---- nova/compute/manager.py | 2 +- nova/db/sqlalchemy/api.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 4262a771b..2f39b8b47 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -405,10 +405,10 @@ class API(base.Base): migration_ref = self.db.migration_get_by_instance_and_status(context, instance_id, 'finished') if not migration_ref: - raise exception.Error(_("No finished migrations found for + raise exception.Error(_("No finished migrations found for \ instance")) - params = { 'migration_id': migration_ref['id']) + params = { 'migration_id': migration_ref['id'] } self._cast_compute_message('revert_resize', context, instance_id, migration_ref['dest_compute'], params=params) @@ -419,11 +419,11 @@ class API(base.Base): migration_ref = self.db.migration_get_by_instance_and_status(context, instance_id, 'finished') if not migration_ref: - raise exception.Error(_("No finished migrations found for + raise exception.Error(_("No finished migrations found for \ instance")) instance_ref = self.db.instance_get(context, instance_id) - params = { 'migration_id': migration_ref['id']) + params = { 'migration_id': migration_ref['id'] } self._cast_compute_message('confirm_resize', context, instance_id, migration_ref['source_compute'], params=param) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d78318bec..4bab7081a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -381,7 +381,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception - @echecks_instance_lock + @checks_instance_lock def confirm_resize(self, context, instance_id, migration_id): """ Destroys the source instance """ context = context.elevated() diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b6fb57df7..f4dc8a630 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1961,7 +1961,7 @@ def migration_get(context, id, session=None): def migration_get_by_instance_and_status(context, instance_id, status): session = get_session() result = session.query(models.Migration).\ - filter_by(instance_id=instance_id). + filter_by(instance_id=instance_id).\ filter_by(status=status).first() if not result: raise exception.NotFound(_("No migration found with instance id %s") -- cgit From 8f206774ee75c2d96c15dd2c604ae5da9601d91f Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 16 Feb 2011 17:02:57 -0600 Subject: Better exceptions --- nova/api/openstack/servers.py | 15 +++++++++------ nova/db/api.py | 2 +- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 83b421127..2fc105d07 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -209,15 +209,17 @@ class Controller(wsgi.Controller): def _action_confirm_resize(self, input_dict, req, id): try: self.compute_api.confirm_resize(req.environ['nova.context'], id) - except: - return faults.Fault(exc.HTTPBadRequest()) + except Exception, e: + LOG.exception(_("Error in confirm-resize %s"), e) + return faults.Fault(exc.HTTPBadRequest(e)) return exc.HTTPNoContent() def _action_revert_resize(self, input_dict, req, id): try: self.compute_api.confirm_resize(req.environ['nova.context'], id) - except: - return faults.Fault(exc.HTTPBadRequest()) + except Exception, e: + LOG.exception(_("Error in revert-resize %s"), e) + return faults.Fault(exc.HTTPBadRequest(e)) return exc.HTTPAccepted() def _action_rebuild(self, input_dict, req, id): @@ -229,8 +231,9 @@ class Controller(wsgi.Controller): flavor_id = input_dict['resize']['flavorId'] self.compute_api.resize(req.environ['nova.context'], id, flavor_id) - except: - return faults.Fault(exc.HTTPUnprocessableEntity()) + except Exception, e: + LOG.exception(_("Error in resize %s"), e) + return faults.Fault(exc.HTTPUnprocessableEntity(e)) return faults.Fault(exc.HTTPAccepted()) diff --git a/nova/db/api.py b/nova/db/api.py index 9ed5efedb..295d1a90a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -271,7 +271,7 @@ def migration_get(context, migration_id): """Finds a migration by the id""" return IMPL.migration_get(context, migration_id) -def migration_get_by_instance(context, instance_id): +def migration_get_by_instance_and_status(context, instance_id, status): """Finds a migration by the instance id its migrating""" return IMPL.migration_get_by_instance(context, instance_id) -- cgit From c01519112245f5e991ab438fe983bf9331d4e952 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 16 Feb 2011 17:51:43 -0600 Subject: fixed --- nova/compute/api.py | 5 ++++- nova/compute/manager.py | 2 -- nova/db/api.py | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 2f39b8b47..635632b73 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -425,7 +425,10 @@ class API(base.Base): params = { 'migration_id': migration_ref['id'] } self._cast_compute_message('confirm_resize', context, instance_id, - migration_ref['source_compute'], params=param) + migration_ref['source_compute'], params=params) + + self.db.migration_update(context, migration_id, + { 'status': 'confirmed' }) self.db.instance_update(context, instance_id, {'host': migration_ref['dest_compute'], }) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 4bab7081a..33fad50fd 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -388,8 +388,6 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_get(context, migration_id) self.driver.destroy(instance_ref) - self.db.migration_update(context, migration_id, - { 'status': 'confirmed' }) @exception.wrap_exception @checks_instance_lock diff --git a/nova/db/api.py b/nova/db/api.py index 295d1a90a..ab871c67e 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -273,7 +273,8 @@ def migration_get(context, migration_id): def migration_get_by_instance_and_status(context, instance_id, status): """Finds a migration by the instance id its migrating""" - return IMPL.migration_get_by_instance(context, instance_id) + return IMPL.migration_get_by_instance_and_status(context, instance_id, + status) #################### -- cgit From e67927c181a1f24df35a6df5663e397e260979cf Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 17 Feb 2011 13:28:39 -0600 Subject: Foo --- nova/api/openstack/servers.py | 2 +- nova/compute/manager.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 2fc105d07..5f369463d 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -216,7 +216,7 @@ class Controller(wsgi.Controller): def _action_revert_resize(self, input_dict, req, id): try: - self.compute_api.confirm_resize(req.environ['nova.context'], id) + self.compute_api.revert_resize(req.environ['nova.context'], id) except Exception, e: LOG.exception(_("Error in revert-resize %s"), e) return faults.Fault(exc.HTTPBadRequest(e)) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 33fad50fd..4e6532ef4 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -398,7 +398,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_get(context, migration_id) - if migration_ref['source_compute'] == instance_ref['host']: + #TODO(mdietz): we may want to split these into separate methods. + if migration_ref['source_compute'] == FLAGS.host: self.driver.power_on(instance_ref) self.db.migration_update(context, migration_id, { 'status': 'reverted' }) -- cgit From b1fe9a64143505235eb2e3dbe6a6c0966a85ae76 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 17 Feb 2011 14:10:08 -0600 Subject: Stop blowing away the ramdisk --- nova/compute/manager.py | 2 +- nova/virt/xenapi/vmops.py | 29 ++++++++++++++++------------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 4e6532ef4..e81ee3148 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -408,7 +408,7 @@ class ComputeManager(manager.Manager): topic = self.db.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) rpc.cast(context, topic, - { 'method': 'resize_instance', + { 'method': 'revert_resize', 'args': { 'migration_id': migration_ref['id'], 'instance_id': instance_id, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 13c13d0f6..17b91d27c 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -234,7 +234,8 @@ class VMOps(object): return self def __exit__(self, type, value, traceback): - self.virt._destroy(self.instance, self.vm_ref, shutdown=False) + self.virt._destroy(self.instance, self.vm_ref, shutdown=False, + destroy_kernel_ramdisk=False) #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added @@ -393,7 +394,7 @@ class VMOps(object): except self.XenAPI.Failure, exc: LOG.exception(exc) - def _destroy_vm(self, instance, vm): + def _destroy_vm(self, instance, vm, destroy_kernel_ramdisk): """Destroys a VM record """ try: kernel = None @@ -402,16 +403,18 @@ class VMOps(object): (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk( self._session, vm) task1 = self._session.call_xenapi('Async.VM.destroy', vm) - LOG.debug(_("Removing kernel/ramdisk files")) - fn = "remove_kernel_ramdisk" - args = {} - if kernel: - args['kernel-file'] = kernel - if ramdisk: - args['ramdisk-file'] = ramdisk - task2 = self._session.async_call_plugin('glance', fn, args) + if destroy_kernel_ramdisk: + LOG.debug(_("Removing kernel/ramdisk files")) + fn = "remove_kernel_ramdisk" + args = {} + if kernel: + args['kernel-file'] = kernel + if ramdisk: + args['ramdisk-file'] = ramdisk + task2 = self._session.async_call_plugin('glance', fn, args) self._session.wait_for_task(instance.id, task1) - self._session.wait_for_task(instance.id, task2) + if destroy_kernel_ramdisk: + self._session.wait_for_task(instance.id, task2) LOG.debug(_("kernel/ramdisk files removed")) except self.XenAPI.Failure, exc: LOG.exception(exc) @@ -426,7 +429,7 @@ class VMOps(object): vm = VMHelper.lookup(self._session, instance.name) return self._destroy(instance, vm, shutdown=True) - def _destroy(self, instance, vm, shutdown=True): + def _destroy(self, instance, vm, shutdown=True, destroy_kernel_ramdisk=True): """ Destroys VM instance by performing: @@ -443,7 +446,7 @@ class VMOps(object): self._shutdown(instance, vm) self._destroy_vdis(instance, vm) - self._destroy_vm(instance, vm) + self._destroy_vm(instance, vm, destroy_kernel_ramdisk) def _wait_with_callback(self, instance_id, task, callback): ret = None -- cgit From 3dd6e369c0aa2e3092eaa32a6b04cbba712ba5ad Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 17 Feb 2011 14:23:20 -0600 Subject: Move the ramdisk logging stuff --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 17b91d27c..f7e434300 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -412,10 +412,10 @@ class VMOps(object): if ramdisk: args['ramdisk-file'] = ramdisk task2 = self._session.async_call_plugin('glance', fn, args) + LOG.debug(_("kernel/ramdisk files removed")) self._session.wait_for_task(instance.id, task1) if destroy_kernel_ramdisk: self._session.wait_for_task(instance.id, task2) - LOG.debug(_("kernel/ramdisk files removed")) except self.XenAPI.Failure, exc: LOG.exception(exc) -- cgit From 3f3dddee0245cb143004dfb8c20204c511bec658 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 17 Feb 2011 16:52:31 -0600 Subject: a few changes and a bunch of unit tests --- nova/api/openstack/servers.py | 16 ++-- .../sqlalchemy/migrate_repo/versions/003_cactus.py | 60 ------------- .../versions/004_add_instance_migrations.py | 60 +++++++++++++ nova/tests/api/openstack/common.py | 30 +++++++ nova/tests/api/openstack/test_servers.py | 98 +++++++++++++++++++++- 5 files changed, 197 insertions(+), 67 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py create mode 100644 nova/tests/api/openstack/common.py diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index fd6b10d5b..a719f5e15 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -227,7 +227,7 @@ class Controller(wsgi.Controller): self.compute_api.confirm_resize(req.environ['nova.context'], id) except Exception, e: LOG.exception(_("Error in confirm-resize %s"), e) - return faults.Fault(exc.HTTPBadRequest(e)) + return faults.Fault(exc.HTTPBadRequest()) return exc.HTTPNoContent() def _action_revert_resize(self, input_dict, req, id): @@ -235,7 +235,7 @@ class Controller(wsgi.Controller): self.compute_api.revert_resize(req.environ['nova.context'], id) except Exception, e: LOG.exception(_("Error in revert-resize %s"), e) - return faults.Fault(exc.HTTPBadRequest(e)) + return faults.Fault(exc.HTTPBadRequest()) return exc.HTTPAccepted() def _action_rebuild(self, input_dict, req, id): @@ -244,12 +244,16 @@ class Controller(wsgi.Controller): def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ try: - flavor_id = input_dict['resize']['flavorId'] - self.compute_api.resize(req.environ['nova.context'], id, - flavor_id) + if 'resize' in input_dict and 'flavorId' in input_dict['resize']: + flavor_id = input_dict['resize']['flavorId'] + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + else: + LOG.exception(_("Missing arguments for resize")) + return faults.Fault(exc.HTTPUnprocessableEntity()) except Exception, e: LOG.exception(_("Error in resize %s"), e) - return faults.Fault(exc.HTTPUnprocessableEntity(e)) + return faults.Fault(exc.HTTPBadRequest()) return faults.Fault(exc.HTTPAccepted()) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py deleted file mode 100644 index 4aab5bdc6..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py +++ /dev/null @@ -1,60 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License.from sqlalchemy import * - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Tables -# - -migrations = Table('migrations', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('source_compute', String(255)), - Column('dest_compute', String(255)), - Column('dest_host', String(255)), - Column('instance_id', Integer, ForeignKey('instances.id'), - nullable=True), - Column('status', String(255)) - ) - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - for table in (migrations, ): - try: - table.create() - except Exception: - logging.info(repr(table)) - logging.exception('Exception while creating table') - raise diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py new file mode 100644 index 000000000..4aab5bdc6 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.from sqlalchemy import * + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Tables +# + +migrations = Table('migrations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('source_compute', String(255)), + Column('dest_compute', String(255)), + Column('dest_host', String(255)), + Column('instance_id', Integer, ForeignKey('instances.id'), + nullable=True), + Column('status', String(255)) + ) + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (migrations, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py new file mode 100644 index 000000000..b55d3087b --- /dev/null +++ b/nova/tests/api/openstack/common.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +import webob + +def webob_factory(url): + base_url = url + def web_request(url, method, body=None): + req = webob.Request.blank("%s%s" % (base_url, url)) + req.method = method + req.body = json.dumps(body) + return req + return web_request + diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a7be0796e..878b62f85 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 OpenStack LLC. +# Copyright 2010-2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -29,6 +29,7 @@ from nova.api.openstack import servers import nova.db.api from nova.db.sqlalchemy.models import Instance import nova.rpc +from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes @@ -138,6 +139,8 @@ class ServersTest(unittest.TestCase): self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api) self.allow_admin = FLAGS.allow_admin_api + self.webreq = common.webob_factor('/v1.0/servers') + def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin @@ -411,6 +414,99 @@ class ServersTest(unittest.TestCase): self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) + def test_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(resize={flavorId=3}})) + res = req.get_response(fakes.wsgi_app()) + + self.resize_called = False + def resize_mock(context, inst_id, flavor): + self.resize_called = True + + self.stubs.Set(nova.compute.api, 'resize_instance', resize_mock) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_resize_bad_param_fails(self): + req = self.webreq('/1/action', 'POST', dict('ferp')) + res = req.get_response(fakes.wsgi_app()) + + self.resize_called = False + def resize_mock(context, inst_id, flavor): + self.resize_called = True + + self.stubs.Set(nova.compute.api, 'resize_instance', resize_mock) + self.assertEqual(res.status_int, 422) + self.assertEqual(self.resize_called, False) + + def test_resize_bad_flavor_fails(self): + req = self.webreq('/1/action', 'POST', dict(resize={derp=3})) + res = req.get_response(fakes.wsgi_app()) + + self.resize_called = False + def resize_mock(context, inst_id, flavor): + self.resize_called = True + + self.stubs.Set(nova.compute.api, 'resize_instance', resize_mock) + self.assertEqual(res.status_int, 422) + self.assertEqual(self.resize_called, False) + + def test_resize_raises_fails(self): + req = self.webreq('/1/action', 'POST', dict(resize={flavorId=3})) + res = req.get_response(fakes.wsgi_app()) + + def resize_mock(context, inst_id, flavor): + raise Exception, 'hurr durr' + + self.stubs.Set(nova.compute.api, 'resize_instance', resize_mock) + self.assertEqual(res.status_int, 500) + + def test_confirm_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) + res = req.get_response(fakes.wsgi_app()) + + self.resize_called = False + def confirm_resize_mock(context, inst_id): + self.resize_called = True + + self.stubs.Set(nova.compute.api, 'confirm_resize', + confirm_resize_mock) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_confirm_resize_server_fails(self): + req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) + res = req.get_response(fakes.wsgi_app()) + + def confirm_resize_mock(context, inst_id): + raise Exception, 'hurr durr' + + self.stubs.Set(nova.compute.api, 'confirm_resize', + confirm_resize_mock) + self.assertEqual(res.status_int, 500) + + def test_revert_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(revertResize=None)) + res = req.get_response(fakes.wsgi_app()) + + self.resize_called = False + def revert_resize_mock(context, inst_id): + self.resize_called = True + + self.stubs.Set(nova.compute.api, 'revert_resize', + confirm_resize_mock) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_revert_resize_server_fails(self): + req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) + res = req.get_response(fakes.wsgi_app()) + + def revert_resize_mock(context, inst_id): + raise Exception, 'hurr durr' + + self.stubs.Set(nova.compute.api, 'revert_resize', + confirm_resize_mock) + self.assertEqual(res.status_int, 500) if __name__ == "__main__": unittest.main() -- cgit From 88aa545b53d96c25da01218c79e8be8c1ae3370f Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Thu, 17 Feb 2011 23:55:56 +0000 Subject: Test changes --- nova/tests/api/openstack/test_servers.py | 82 +++++++++++++++----------------- 1 file changed, 39 insertions(+), 43 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 878b62f85..665551c55 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -26,6 +26,7 @@ from nova import db from nova import flags import nova.api.openstack from nova.api.openstack import servers +import nova.compute.api import nova.db.api from nova.db.sqlalchemy.models import Instance import nova.rpc @@ -139,7 +140,7 @@ class ServersTest(unittest.TestCase): self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api) self.allow_admin = FLAGS.allow_admin_api - self.webreq = common.webob_factor('/v1.0/servers') + self.webreq = common.webob_factory('/v1.0/servers') def tearDown(self): self.stubs.UnsetAll() @@ -415,98 +416,93 @@ class ServersTest(unittest.TestCase): self.assertEqual(self.server_delete_called, True) def test_resize_server(self): - req = self.webreq('/1/action', 'POST', dict(resize={flavorId=3}})) - res = req.get_response(fakes.wsgi_app()) + req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) self.resize_called = False - def resize_mock(context, inst_id, flavor): + def resize_mock(*args): self.resize_called = True - self.stubs.Set(nova.compute.api, 'resize_instance', resize_mock) + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) self.assertEqual(self.resize_called, True) - def test_resize_bad_param_fails(self): - req = self.webreq('/1/action', 'POST', dict('ferp')) - res = req.get_response(fakes.wsgi_app()) + def test_resize_bad_flavor_fails(self): + req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3))) self.resize_called = False - def resize_mock(context, inst_id, flavor): + def resize_mock(*args): self.resize_called = True - self.stubs.Set(nova.compute.api, 'resize_instance', resize_mock) - self.assertEqual(res.status_int, 422) - self.assertEqual(self.resize_called, False) + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) - def test_resize_bad_flavor_fails(self): - req = self.webreq('/1/action', 'POST', dict(resize={derp=3})) res = req.get_response(fakes.wsgi_app()) - - self.resize_called = False - def resize_mock(context, inst_id, flavor): - self.resize_called = True - - self.stubs.Set(nova.compute.api, 'resize_instance', resize_mock) self.assertEqual(res.status_int, 422) self.assertEqual(self.resize_called, False) def test_resize_raises_fails(self): - req = self.webreq('/1/action', 'POST', dict(resize={flavorId=3})) - res = req.get_response(fakes.wsgi_app()) + req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) - def resize_mock(context, inst_id, flavor): + def resize_mock(*args): raise Exception, 'hurr durr' - self.stubs.Set(nova.compute.api, 'resize_instance', resize_mock) - self.assertEqual(res.status_int, 500) + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) def test_confirm_resize_server(self): req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) - res = req.get_response(fakes.wsgi_app()) self.resize_called = False - def confirm_resize_mock(context, inst_id): + def confirm_resize_mock(*args): self.resize_called = True - self.stubs.Set(nova.compute.api, 'confirm_resize', + self.stubs.Set(nova.compute.api.API, 'confirm_resize', confirm_resize_mock) - self.assertEqual(res.status_int, 202) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 204) self.assertEqual(self.resize_called, True) def test_confirm_resize_server_fails(self): req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) - res = req.get_response(fakes.wsgi_app()) - def confirm_resize_mock(context, inst_id): + def confirm_resize_mock(*args): raise Exception, 'hurr durr' - self.stubs.Set(nova.compute.api, 'confirm_resize', + self.stubs.Set(nova.compute.api.API, 'confirm_resize', confirm_resize_mock) - self.assertEqual(res.status_int, 500) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) def test_revert_resize_server(self): req = self.webreq('/1/action', 'POST', dict(revertResize=None)) - res = req.get_response(fakes.wsgi_app()) self.resize_called = False - def revert_resize_mock(context, inst_id): + def revert_resize_mock(*args): self.resize_called = True - self.stubs.Set(nova.compute.api, 'revert_resize', - confirm_resize_mock) + self.stubs.Set(nova.compute.api.API, 'revert_resize', + revert_resize_mock) + + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) self.assertEqual(self.resize_called, True) def test_revert_resize_server_fails(self): - req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) - res = req.get_response(fakes.wsgi_app()) + req = self.webreq('/1/action', 'POST', dict(revertResize=None)) - def revert_resize_mock(context, inst_id): + def revert_resize_mock(*args): raise Exception, 'hurr durr' - self.stubs.Set(nova.compute.api, 'revert_resize', - confirm_resize_mock) - self.assertEqual(res.status_int, 500) + self.stubs.Set(nova.compute.api.API, 'revert_resize', + revert_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) if __name__ == "__main__": unittest.main() -- cgit From 4b51ec3e9bca7421c66816c77c43396e51e68ea6 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 17 Feb 2011 23:09:06 -0600 Subject: Tests --- nova/tests/api/openstack/common.py | 8 +++++--- nova/tests/test_compute.py | 7 +++++++ nova/virt/fake.py | 13 +++++++++++++ nova/virt/xenapi_conn.py | 8 -------- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py index b55d3087b..66207cddc 100644 --- a/nova/tests/api/openstack/common.py +++ b/nova/tests/api/openstack/common.py @@ -21,10 +21,12 @@ import webob def webob_factory(url): base_url = url - def web_request(url, method, body=None): + def web_request(url, method=None, body=None): req = webob.Request.blank("%s%s" % (base_url, url)) - req.method = method - req.body = json.dumps(body) + if method: + req.method = method + if body: + req.body = json.dumps(body) return req return web_request diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 2aa0690e7..e27e08827 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -258,3 +258,10 @@ class ComputeTestCase(test.TestCase): self.assertEqual(ret_val, None) self.compute.terminate_instance(self.context, instance_id) + + def test_resize_instance(self): + """Ensure instance can be migrated/resized""" + instance_id = self._create_instance() + self.compute.run_instnce(self.context, instance_id) + self.compute.prep_resize(self.context, instance_id) + diff --git a/nova/virt/fake.py b/nova/virt/fake.py index ff5e22603..da86df6d4 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -176,6 +176,19 @@ class FakeConnection(object): """ pass + def migrate_disk_and_power_off(self, instance, dest): + """ + Transfers the disk of a running instance in multiple phases, turning + off the instance before the end. + """ + pass + + def attach_disk(self, instance, disk_info): + """ + Attaches the disk to an instance given the metadata disk_info + """ + pass + def pause(self, instance, callback): """ Pause the specified instance. diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index aafd836e2..be018b47f 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -184,14 +184,6 @@ class XenAPIConnection(object): """Unpause paused VM instance""" self._vmops.unpause(instance, callback) - def power_off(self, instance): - """Shuts down a running VM instance""" - self._vmops._shutdown(instance, method='clean') - - def power_on(self, instance): - """powers on a powered off VM instance""" - self._vmops.power_on(instance) - def migrate_disk_and_power_off(self, instance, dest): """Transfers the VHD of a running instance to another host, then shuts off the instance copies over the COW disk""" -- cgit From 671766cb4ada59b0e575b395b5afff82950ddb76 Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Fri, 18 Feb 2011 06:03:15 +0000 Subject: Resize compute tests --- nova/tests/test_compute.py | 24 +++++++++++++++++++++--- nova/virt/fake.py | 6 ++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index e27e08827..3f2e64c87 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -56,7 +56,7 @@ class ComputeTestCase(test.TestCase): self.manager.delete_project(self.project) super(ComputeTestCase, self).tearDown() - def _create_instance(self): + def _create_instance(self, params={}): """Create a test instance""" inst = {} inst['image_id'] = 'ami-test' @@ -67,6 +67,7 @@ class ComputeTestCase(test.TestCase): inst['instance_type'] = 'm1.tiny' inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 + inst.update(params) return db.instance_create(self.context, inst)['id'] def _create_group(self): @@ -262,6 +263,23 @@ class ComputeTestCase(test.TestCase): def test_resize_instance(self): """Ensure instance can be migrated/resized""" instance_id = self._create_instance() - self.compute.run_instnce(self.context, instance_id) - self.compute.prep_resize(self.context, instance_id) + context = self.context.elevated() + self.compute.run_instance(self.context, instance_id) + db.instance_update(self.context, instance_id, {'host':'foo'}) + + self.compute.prep_resize(context, instance_id) + migration_ref = db.migration_get_by_instance_and_status(context, + instance_id, 'pre-migrating') + self.compute.resize_instance(context, instance_id, + migration_ref['id']) + self.compute.terminate_instance(context, instance_id) + + def test_resize_same_source_fails(self): + """Ensure instance fails to migrate when source and destination are + the same host""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.assertRaises(exception.Error, self.compute.prep_resize, + self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index da86df6d4..9106ebf03 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -139,6 +139,12 @@ class FakeConnection(object): """ pass + def get_host_ip_addr(self): + """ + Retrieves the IP address of the dom0 + """ + pass + def resize(self, instance, flavor): """ Resizes/Migrates the specified instance. -- cgit From bb5624258200f027320327a38c524c389979c97a Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Fri, 18 Feb 2011 19:04:57 +0000 Subject: Resize compute tests --- nova/tests/test_xenapi.py | 34 ++++++++++++++++++++++++++++++++++ nova/tests/xenapi/stubs.py | 25 +++++++++++++++++++++++-- nova/virt/xenapi/fake.py | 2 +- 3 files changed, 58 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 6b8efc9d8..ee4c68e6c 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -336,3 +336,37 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() + +class XenAPIMigrateInstance(test.TestCase): + """ + Unit test for verifying migration-related actions + """ + def setUp(self): + super(XenAPIMigrateInstance, self).setUp() + self.stubs = stubout.StubOutForTesting() + FLAGS.target_host = '127.0.0.1' + FLAGS.xenapi_connection_url = 'test_url' + FLAGS.xenapi_connection_password = 'test_pass' + db_fakes.stub_out_db_instance_api(self.stubs) + stubs.stub_out_get_target(self.stubs) + xenapi_fake.reset() + self.values = {'name': 1, 'id': 1, + 'project_id': 'fake', + 'user_id': 'fake', + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } + stubs.stub_out_migration_methods(self.stubs) + + def test_migrate_disk_and_power_off(self): + FLAGS.target_host = '127.0.0.1' + FLAGS.xenapi_connection_url = 'test_url' + FLAGS.xenapi_connection_password = 'test_pass' + destination = '127.0.0.1' + instance = db.instance_create(self.values) + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + conn = xenapi_conn.get_connection(False) + conn.migrate_disk_and_power_off(instance, destination) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 624995ada..d1c367475 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -20,6 +20,7 @@ from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils from nova.virt.xenapi import vm_utils +from nova.virt.xenapi import vmops def stubout_instance_snapshot(stubs): @@ -170,8 +171,8 @@ class FakeSessionForVMTests(fake.SessionBase): def VM_destroy(self, session_ref, vm_ref): fake.destroy_vm(vm_ref) - - + + class FakeSessionForVolumeTests(fake.SessionBase): """ Stubs out a XenAPISession for Volume tests """ def __init__(self, uri): @@ -205,3 +206,23 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): def SR_forget(self, _1, ref): pass + +class FakeSessionForMigrationTests(fake.SessionBase): + """ Stubs out a XenAPISession for Migration tests """ + def __init__(self, uri): + super(FakeSessionForMigrationTests, self).__init__(uri) + + +class FakeSnapshot(vmops.VMOps): + def __getattr__(self, key): + return 'fake' + + def __exit__(self, type, value, traceback) + pass + +def fake_get_snapshot(self, instance): + return FakeSnapshot() + +def stub_out_migration_methods(stubs): + stubs.Set(vmops.VMOps, '_get_snapshot', + fake_get_snapshot) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 018d0dcd3..e1ae03e70 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -401,7 +401,7 @@ class SessionBase(object): field in _db_content[cls][ref]): return _db_content[cls][ref][field] - LOG.debuug(_('Raising NotImplemented')) + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( _('xenapi.fake does not have an implementation for %s or it has ' 'been called with the wrong number of arguments') % name) -- cgit From 62b3eb71384581e900b061e65caa6418c4452fa9 Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Fri, 18 Feb 2011 21:37:57 +0000 Subject: XenAPI tests --- nova/tests/test_xenapi.py | 12 +++++++----- nova/tests/xenapi/stubs.py | 38 +++++++++++++++++++++++++++++--------- nova/virt/xenapi/fake.py | 3 +++ 3 files changed, 39 insertions(+), 14 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index ee4c68e6c..3cbc01e5c 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -362,11 +362,13 @@ class XenAPIMigrateInstance(test.TestCase): stubs.stub_out_migration_methods(self.stubs) def test_migrate_disk_and_power_off(self): - FLAGS.target_host = '127.0.0.1' - FLAGS.xenapi_connection_url = 'test_url' - FLAGS.xenapi_connection_password = 'test_pass' - destination = '127.0.0.1' instance = db.instance_create(self.values) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) conn = xenapi_conn.get_connection(False) - conn.migrate_disk_and_power_off(instance, destination) + conn.migrate_disk_and_power_off(instance, '127.0.0.1') + + def test_attach_disk(self): + instance = db.instance_create(self.values) + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + conn = xenapi_conn.get_connection(False) + conn.attach_disk(instance, {'base_copy': 'hurr', 'cow': 'durr'}) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index d1c367475..054fc434b 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -213,16 +213,36 @@ class FakeSessionForMigrationTests(fake.SessionBase): super(FakeSessionForMigrationTests, self).__init__(uri) -class FakeSnapshot(vmops.VMOps): - def __getattr__(self, key): - return 'fake' +def stub_out_migration_methods(stubs): + class FakeSnapshot(object): + def __getattr__(self, key): + return str(key) + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + pass + + def fake_get_snapshot(self, instance): + return FakeSnapshot() - def __exit__(self, type, value, traceback) + @classmethod + def fake_get_vdi(cls, session, vm_ref): + vdi_ref = fake.create_vdi(name_label='derp', read_only=False, + sr_ref='herp', sharable=False) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + return vdi_ref, {'uuid': vdi_rec['uuid']} + + def fake_shutdown(self, inst, vm, method='clean'): pass -def fake_get_snapshot(self, instance): - return FakeSnapshot() + @classmethod + def fake_scan_sr(cls, session): + pass -def stub_out_migration_methods(stubs): - stubs.Set(vmops.VMOps, '_get_snapshot', - fake_get_snapshot) + stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_scan_sr) + stubs.Set(vmops.VMOps, '_get_snapshot', fake_get_snapshot) + stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi) + stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x,y,z: None) + stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index e1ae03e70..ba12d4d3a 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -290,6 +290,9 @@ class SessionBase(object): #Always return 12GB available return 12 * 1024 * 1024 * 1024 + def host_call_plugin(*args): + return 'herp' + def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) -- cgit From a43c5929de7ebf58eb9ecb8416ce3cf4194c176a Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 18 Feb 2011 16:13:34 -0600 Subject: Pep8 cleanup --- nova/api/openstack/servers.py | 13 ++-- nova/compute/api.py | 12 ++- nova/compute/manager.py | 85 +++++++++++----------- nova/db/api.py | 5 +- nova/db/sqlalchemy/api.py | 6 +- .../versions/004_add_instance_migrations.py | 3 +- nova/db/sqlalchemy/models.py | 5 +- nova/tests/api/openstack/common.py | 7 +- nova/tests/api/openstack/test_servers.py | 24 +++--- nova/tests/test_compute.py | 6 +- nova/tests/test_xenapi.py | 1 + nova/tests/xenapi/stubs.py | 11 +-- nova/virt/xenapi/vm_utils.py | 13 ++-- nova/virt/xenapi/vmops.py | 5 +- nova/virt/xenapi_conn.py | 6 +- 15 files changed, 104 insertions(+), 98 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index a719f5e15..f68c97323 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -209,12 +209,12 @@ class Controller(wsgi.Controller): resize a server """ actions = { - 'reboot':self._action_reboot, - 'resize':self._action_resize, - 'confirmResize':self._action_confirm_resize, - 'revertResize':self._action_revert_resize, - 'rebuild':self._action_rebuild - } + 'reboot': self._action_reboot, + 'resize': self._action_resize, + 'confirmResize': self._action_confirm_resize, + 'revertResize': self._action_revert_resize, + 'rebuild': self._action_rebuild, + } input_dict = self._deserialize(req.body, req) for key in actions.keys(): @@ -256,7 +256,6 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPBadRequest()) return faults.Fault(exc.HTTPAccepted()) - def _action_reboot(self, input_dict, req, id): try: reboot_type = input_dict['reboot']['type'] diff --git a/nova/compute/api.py b/nova/compute/api.py index 371cbae5f..2eb0e0743 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -408,7 +408,7 @@ class API(base.Base): raise exception.Error(_("No finished migrations found for \ instance")) - params = { 'migration_id': migration_ref['id'] } + params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, instance_id, migration_ref['dest_compute'], params=params) @@ -422,14 +422,12 @@ class API(base.Base): raise exception.Error(_("No finished migrations found for \ instance")) instance_ref = self.db.instance_get(context, instance_id) - - params = { 'migration_id': migration_ref['id'] } + params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, instance_id, migration_ref['source_compute'], params=params) - self.db.migration_update(context, migration_id, - { 'status': 'confirmed' }) - + self.db.migration_update(context, migration_id, + {'status': 'confirmed'}) self.db.instance_update(context, instance_id, {'host': migration_ref['dest_compute'], }) @@ -439,7 +437,7 @@ class API(base.Base): {"method": "prep_resize", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, }},) - + def pause(self, context, instance_id): """Pause the given instance.""" self._cast_compute_message('pause_instance', context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1e1c44663..3f6c359ba 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -379,7 +379,7 @@ class ComputeManager(manager.Manager): def _update_state_callback(self, context, instance_id, result): """Update instance state when async task completes.""" self._update_state(context, instance_id) - + @exception.wrap_exception @checks_instance_lock def confirm_resize(self, context, instance_id, migration_id): @@ -392,8 +392,8 @@ class ComputeManager(manager.Manager): @exception.wrap_exception @checks_instance_lock def revert_resize(self, context, instance_id, migration_id): - """Destroys the new instance on the destination machine, - reverts the model changes, and powers on the old + """Destroys the new instance on the destination machine, + reverts the model changes, and powers on the old instance on the source machine""" instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_get(context, migration_id) @@ -401,24 +401,23 @@ class ComputeManager(manager.Manager): #TODO(mdietz): we may want to split these into separate methods. if migration_ref['source_compute'] == FLAGS.host: self.driver.power_on(instance_ref) - self.db.migration_update(context, migration_id, - { 'status': 'reverted' }) + self.db.migration_update(context, migration_id, + {'status': 'reverted'}) else: self.driver.destroy(instance_ref) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, + topic = self.db.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) - rpc.cast(context, topic, - { 'method': 'revert_resize', - 'args': { - 'migration_id': migration_ref['id'], - 'instance_id': instance_id, - }, + rpc.cast(context, topic, + {'method': 'revert_resize', + 'args': { + 'migration_id': migration_ref['id'], + 'instance_id': instance_id, }, }) @exception.wrap_exception @checks_instance_lock def prep_resize(self, context, instance_id): - """Initiates the process of moving a running instance to another + """Initiates the process of moving a running instance to another host, possibly changing the RAM and disk size in the process""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -426,21 +425,21 @@ class ComputeManager(manager.Manager): raise exception.Error(_( 'Migration error: destination same as source!')) - migration_ref = self.db.migration_create(context, - { 'instance_id': instance_id, - 'source_compute': instance_ref['host'], - 'dest_compute': FLAGS.host, - 'dest_host': self.driver.get_host_ip_addr(), - 'status': 'pre-migrating' }) - LOG.audit(_('instance %s: migrating to '), instance_id, context=context) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, + migration_ref = self.db.migration_create(context, + {'instance_id': instance_id, + 'source_compute': instance_ref['host'], + 'dest_compute': FLAGS.host, + 'dest_host': self.driver.get_host_ip_addr(), + 'status': 'pre-migrating'}) + LOG.audit(_('instance %s: migrating to '), instance_id, + context=context) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) - rpc.cast(context, topic, - { 'method': 'resize_instance', - 'args': { - 'migration_id': migration_ref['id'], - 'instance_id': instance_id, - }, + rpc.cast(context, topic, + {'method': 'resize_instance', + 'args': { + 'migration_id': migration_ref['id'], + 'instance_id': instance_id, }, }) @exception.wrap_exception @@ -449,28 +448,26 @@ class ComputeManager(manager.Manager): """Starts the migration of a running instance to another host""" migration_ref = self.db.migration_get(context, migration_id) instance_ref = self.db.instance_get(context, instance_id) - self.db.migration_update(context, migration_id, - { 'status': 'migrating', }) + self.db.migration_update(context, migration_id, + {'status': 'migrating', }) - disk_info = self.driver.migrate_disk_and_power_off(instance_ref, + disk_info = self.driver.migrate_disk_and_power_off(instance_ref, migration_ref['dest_host']) - - self.db.migration_update(context, migration_id, - { 'status': 'post-migrating', }) + self.db.migration_update(context, migration_id, + {'status': 'post-migrating', }) - #TODO(mdietz): This is where we would update the VM record + #TODO(mdietz): This is where we would update the VM record #after resizing service = self.db.service_get_by_host_and_topic(context, migration_ref['dest_compute'], FLAGS.compute_topic) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, + topic = self.db.queue_get_for(context, FLAGS.compute_topic, migration_ref['dest_compute']) - rpc.cast(context, topic, - { 'method': 'finish_resize', - 'args': { - 'migration_id': migration_id, - 'instance_id': instance_id, - 'disk_info': disk_info, - }, + rpc.cast(context, topic, + {'method': 'finish_resize', + 'args': { + 'migration_id': migration_id, + 'instance_id': instance_id, + 'disk_info': disk_info, }, }) @exception.wrap_exception @@ -486,8 +483,8 @@ class ComputeManager(manager.Manager): new_disk_info = self.driver.attach_disk(instance_ref, disk_info) self.driver.spawn(instance_ref, disk=new_disk_info) - self.db.migration_update(context, migration_id, - {'status': 'finished', }) + self.db.migration_update(context, migration_id, + {'status': 'finished', }) @exception.wrap_exception @checks_instance_lock diff --git a/nova/db/api.py b/nova/db/api.py index 5a9d49374..8706ef3d6 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -260,17 +260,20 @@ def floating_ip_get_by_address(context, address): #################### def migration_update(context, id, values): - """Update a migration instance""" + """Update a migration instance""" return IMPL.migration_update(context, id, values) + def migration_create(context, values): """Create a migration record""" return IMPL.migration_create(context, values) + def migration_get(context, migration_id): """Finds a migration by the id""" return IMPL.migration_get(context, migration_id) + def migration_get_by_instance_and_status(context, instance_id, status): """Finds a migration by the instance id its migrating""" return IMPL.migration_get_by_instance_and_status(context, instance_id, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 210b53296..facb46b8b 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -156,6 +156,7 @@ def service_get_all_by_topic(context, topic): filter_by(topic=topic).\ all() + @require_admin_context def service_get_by_host_and_topic(context, host, topic): session = get_session() @@ -166,6 +167,7 @@ def service_get_by_host_and_topic(context, host, topic): filter_by(topic=topic).\ first() + @require_admin_context def service_get_all_by_host(context, host): session = get_session() @@ -1996,7 +1998,7 @@ def migration_get(context, id, session=None): result = session.query(models.Migration).\ filter_by(id=id).first() if not result: - raise exception.NotFound(_("No migration found with id %s") + raise exception.NotFound(_("No migration found with id %s") % migration_id) return result @@ -2008,7 +2010,7 @@ def migration_get_by_instance_and_status(context, instance_id, status): filter_by(instance_id=instance_id).\ filter_by(status=status).first() if not result: - raise exception.NotFound(_("No migration found with instance id %s") + raise exception.NotFound(_("No migration found with instance id %s") % migration_id) return result diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py index 4aab5bdc6..4fda525f1 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py @@ -44,9 +44,10 @@ migrations = Table('migrations', meta, Column('dest_host', String(255)), Column('instance_id', Integer, ForeignKey('instances.id'), nullable=True), - Column('status', String(255)) + Column('status', String(255)), ) + def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 0140fbeab..b05f134b7 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -374,7 +374,8 @@ class Migration(BASE, NovaBase): dest_compute = Column(String(255)) dest_host = Column(String(255)) instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) - status = Column(String(255)) #TODO(_cerberus_): enum + #TODO(_cerberus_): enum + status = Column(String(255)) class Network(BASE, NovaBase): @@ -559,7 +560,7 @@ def register_models(): Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate, ConsolePool, Console, + Project, Certificate, ConsolePool, Console, Migration) # , Image, Host engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py index 66207cddc..3f9c7d3cf 100644 --- a/nova/tests/api/openstack/common.py +++ b/nova/tests/api/openstack/common.py @@ -19,14 +19,17 @@ import json import webob + def webob_factory(url): + """Factory for removing duplicate webob code from tests""" + base_url = url + def web_request(url, method=None, body=None): - req = webob.Request.blank("%s%s" % (base_url, url)) + req = webob.Request.blank("%s%s" % (base_url, url)) if method: req.method = method if body: req.body = json.dumps(body) return req return web_request - diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 665551c55..4eb4a3c62 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -419,8 +419,9 @@ class ServersTest(unittest.TestCase): req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) self.resize_called = False + def resize_mock(*args): - self.resize_called = True + self.resize_called = True self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) @@ -432,8 +433,9 @@ class ServersTest(unittest.TestCase): req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3))) self.resize_called = False + def resize_mock(*args): - self.resize_called = True + self.resize_called = True self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) @@ -445,7 +447,7 @@ class ServersTest(unittest.TestCase): req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) def resize_mock(*args): - raise Exception, 'hurr durr' + raise Exception('hurr durr') self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) @@ -456,10 +458,11 @@ class ServersTest(unittest.TestCase): req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) self.resize_called = False + def confirm_resize_mock(*args): - self.resize_called = True + self.resize_called = True - self.stubs.Set(nova.compute.api.API, 'confirm_resize', + self.stubs.Set(nova.compute.api.API, 'confirm_resize', confirm_resize_mock) res = req.get_response(fakes.wsgi_app()) @@ -470,9 +473,9 @@ class ServersTest(unittest.TestCase): req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) def confirm_resize_mock(*args): - raise Exception, 'hurr durr' + raise Exception('hurr durr') - self.stubs.Set(nova.compute.api.API, 'confirm_resize', + self.stubs.Set(nova.compute.api.API, 'confirm_resize', confirm_resize_mock) res = req.get_response(fakes.wsgi_app()) @@ -482,10 +485,11 @@ class ServersTest(unittest.TestCase): req = self.webreq('/1/action', 'POST', dict(revertResize=None)) self.resize_called = False + def revert_resize_mock(*args): self.resize_called = True - self.stubs.Set(nova.compute.api.API, 'revert_resize', + self.stubs.Set(nova.compute.api.API, 'revert_resize', revert_resize_mock) res = req.get_response(fakes.wsgi_app()) @@ -496,9 +500,9 @@ class ServersTest(unittest.TestCase): req = self.webreq('/1/action', 'POST', dict(revertResize=None)) def revert_resize_mock(*args): - raise Exception, 'hurr durr' + raise Exception('hurr durr') - self.stubs.Set(nova.compute.api.API, 'revert_resize', + self.stubs.Set(nova.compute.api.API, 'revert_resize', revert_resize_mock) res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 3f2e64c87..5fd1ddaec 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -265,8 +265,7 @@ class ComputeTestCase(test.TestCase): instance_id = self._create_instance() context = self.context.elevated() self.compute.run_instance(self.context, instance_id) - db.instance_update(self.context, instance_id, {'host':'foo'}) - + db.instance_update(self.context, instance_id, {'host': 'foo'}) self.compute.prep_resize(context, instance_id) migration_ref = db.migration_get_by_instance_and_status(context, instance_id, 'pre-migrating') @@ -279,7 +278,6 @@ class ComputeTestCase(test.TestCase): the same host""" instance_id = self._create_instance() self.compute.run_instance(self.context, instance_id) - self.assertRaises(exception.Error, self.compute.prep_resize, + self.assertRaises(exception.Error, self.compute.prep_resize, self.context, instance_id) - self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 3cbc01e5c..cb9b6620a 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -337,6 +337,7 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() + class XenAPIMigrateInstance(test.TestCase): """ Unit test for verifying migration-related actions diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 054fc434b..303c37eb9 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -171,8 +171,8 @@ class FakeSessionForVMTests(fake.SessionBase): def VM_destroy(self, session_ref, vm_ref): fake.destroy_vm(vm_ref) - - + + class FakeSessionForVolumeTests(fake.SessionBase): """ Stubs out a XenAPISession for Volume tests """ def __init__(self, uri): @@ -207,6 +207,7 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): def SR_forget(self, _1, ref): pass + class FakeSessionForMigrationTests(fake.SessionBase): """ Stubs out a XenAPISession for Migration tests """ def __init__(self, uri): @@ -232,8 +233,8 @@ def stub_out_migration_methods(stubs): vdi_ref = fake.create_vdi(name_label='derp', read_only=False, sr_ref='herp', sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) - return vdi_ref, {'uuid': vdi_rec['uuid']} - + return vdi_ref, {'uuid': vdi_rec['uuid'], } + def fake_shutdown(self, inst, vm, method='clean'): pass @@ -244,5 +245,5 @@ def stub_out_migration_methods(stubs): stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_scan_sr) stubs.Set(vmops.VMOps, '_get_snapshot', fake_get_snapshot) stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi) - stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x,y,z: None) + stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None) stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 91e7339b1..436c88023 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -256,21 +256,18 @@ class VMHelper(HelperBase): else: num_vdis = len(vdi_refs) if num_vdis != 1: - raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found" + raise Exception( + _("Unexpected number of VDIs (%(num_vdis)s) found" " for VM %(vm_ref)s") % locals()) vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) return vdi_ref, vdi_rec - - - @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, - Snapshot VHD - """ + Snapshot VHD """ #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...") @@ -284,7 +281,7 @@ class VMHelper(HelperBase): task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) template_vm_ref = session.wait_for_task(instance_id, task) - template_vdi_rec = cls.get_vdi_for_vm_safely(session, + template_vdi_rec = cls.get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] @@ -299,7 +296,7 @@ class VMHelper(HelperBase): @classmethod def get_sr(cls, session, sr_label='slices'): - """ Finds the SR named by the given name label and returns + """ Finds the SR named by the given name label and returns the UUID """ return session.call_xenapi('SR.get_by_name_label', sr_label)[0] diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 552c2ddd1..d457f2e3f 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -345,7 +345,7 @@ class VMOps(object): 'new_base_copy_uuid': new_base_copy_uuid, 'new_cow_uuid': new_cow_uuid, } - task = self._session.async_call_plugin('migration', + task = self._session.async_call_plugin('migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)}) self._session.wait_for_task(instance.id, task) @@ -469,7 +469,8 @@ class VMOps(object): vm = VMHelper.lookup(self._session, instance.name) return self._destroy(instance, vm, shutdown=True) - def _destroy(self, instance, vm, shutdown=True, destroy_kernel_ramdisk=True): + def _destroy(self, instance, vm, shutdown=True, + destroy_kernel_ramdisk=True): """ Destroys VM instance by performing: diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index be018b47f..e1c5dcc7c 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -192,7 +192,7 @@ class XenAPIConnection(object): def attach_disk(self, instance, disk_info): """Moves the copied VDIs into the SR""" return self._vmops.attach_disk(instance, disk_info) - + def suspend(self, instance, callback): """suspend the specified instance""" self._vmops.suspend(instance, callback) @@ -220,9 +220,9 @@ class XenAPIConnection(object): def get_ajax_console(self, instance): """Return link to instance's ajax console""" return self._vmops.get_ajax_console(instance) - + def get_host_ip_addr(self): - xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url) + xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url) return xs_url.netloc def attach_volume(self, instance_name, device_path, mountpoint): -- cgit From f4289df0e58080d6d9fa381915bbd0d29f3b9751 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 15:05:48 -0800 Subject: We're not using prefix matching on AMQP, so fakerabbit shouldn't be doing it! --- nova/fakerabbit.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index dd82a9366..a7dee8caf 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -48,7 +48,6 @@ class Exchange(object): nm = self.name LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)' ' %(message)s') % locals()) - routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: LOG.debug(_('Publishing to route %s'), f) -- cgit From e8a3f461319c1ee20a18f3a375af5e1e958af05e Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 15:50:42 -0800 Subject: Missing import for nova.exceptions (!) --- nova/network/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/network/api.py b/nova/network/api.py index bf43acb51..4ee1148cb 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -21,6 +21,7 @@ Handles all requests relating to instances (guest vms). """ from nova import db +from nova import exception from nova import flags from nova import log as logging from nova import quota -- cgit From ab6b11b0399655ccdd9619be00470eda464cf2a7 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 16:09:41 -0800 Subject: Don't blindly concatenate queue name if second portiion is None --- nova/db/sqlalchemy/api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 2697fac73..009ed1f06 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1209,7 +1209,9 @@ def project_get_network_v6(context, project_id): def queue_get_for(_context, topic, physical_node_id): # FIXME(ja): this should be servername? - return "%s.%s" % (topic, physical_node_id) + if physical_node_id: + return "%s.%s" % (topic, physical_node_id) + return topic ################### -- cgit From b6254db80ca9841361775a92b85f88db7251f857 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 17:45:38 -0800 Subject: Refactoring nova-api to be a service, so that we can reuse it in tests --- bin/nova-api | 50 ++++---------------------------- nova/apiservice.py | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 45 deletions(-) create mode 100644 nova/apiservice.py diff --git a/bin/nova-api b/bin/nova-api index d5efb4687..99417e6c6 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -36,57 +36,17 @@ gettext.install('nova', unicode=1) from nova import flags from nova import log as logging -from nova import version +from nova import apiservice +from nova import utils from nova import wsgi -LOG = logging.getLogger('nova.api') - FLAGS = flags.FLAGS -flags.DEFINE_string('ec2_listen', "0.0.0.0", - 'IP address for EC2 API to listen') -flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen') -flags.DEFINE_string('osapi_listen', "0.0.0.0", - 'IP address for OpenStack API to listen') -flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen') - -API_ENDPOINTS = ['ec2', 'osapi'] - - -def run_app(paste_config_file): - LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file) - apps = [] - for api in API_ENDPOINTS: - config = wsgi.load_paste_configuration(paste_config_file, api) - if config is None: - LOG.debug(_("No paste configuration for app: %s"), api) - continue - LOG.debug(_("App Config: %(api)s\n%(config)r") % locals()) - LOG.info(_("Running %s API"), api) - app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_listen_port" % api), - getattr(FLAGS, "%s_listen" % api))) - if len(apps) == 0: - LOG.error(_("No known API applications configured in %s."), - paste_config_file) - return - - server = wsgi.Server() - for app in apps: - server.start(*app) - server.wait() - if __name__ == '__main__': FLAGS(sys.argv) logging.setup() - LOG.audit(_("Starting nova-api node (version %s)"), - version.version_string_with_vcs()) - LOG.debug(_("Full set of FLAGS:")) - for flag in FLAGS: - flag_get = FLAGS.get(flag, None) - LOG.debug("%(flag)s : %(flag_get)s" % locals()) conf = wsgi.paste_config_file('nova-api.conf') - if conf: - run_app(conf) - else: + if not conf: LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf') + else: + apiservice.serve(conf) diff --git a/nova/apiservice.py b/nova/apiservice.py new file mode 100644 index 000000000..7b453e19f --- /dev/null +++ b/nova/apiservice.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Wrapper for API service, makes it look more like the non-WSGI services +""" + +from nova import flags +from nova import log as logging +from nova import version +from nova import wsgi + + +LOG = logging.getLogger('nova.api') + +FLAGS = flags.FLAGS +flags.DEFINE_string('ec2_listen', "0.0.0.0", + 'IP address for EC2 API to listen') +flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen') +flags.DEFINE_string('osapi_listen', "0.0.0.0", + 'IP address for OpenStack API to listen') +flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen') + +API_ENDPOINTS = ['ec2', 'osapi'] + + +def _run_app(paste_config_file): + LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file) + apps = [] + for api in API_ENDPOINTS: + config = wsgi.load_paste_configuration(paste_config_file, api) + if config is None: + LOG.debug(_("No paste configuration for app: %s"), api) + continue + LOG.debug(_("App Config: %(api)s\n%(config)r") % locals()) + LOG.info(_("Running %s API"), api) + app = wsgi.load_paste_app(paste_config_file, api) + apps.append((app, getattr(FLAGS, "%s_listen_port" % api), + getattr(FLAGS, "%s_listen" % api))) + if len(apps) == 0: + LOG.error(_("No known API applications configured in %s."), + paste_config_file) + return + + server = wsgi.Server() + for app in apps: + server.start(*app) + server.wait() + + +class ApiService(object): + """Base class for workers that run on hosts.""" + + def __init__(self, conf): + self.conf = conf + + def start(self): + _run_app(self.conf) + + +def serve(conf): + LOG.audit(_("Starting nova-api node (version %s)"), + version.version_string_with_vcs()) + LOG.debug(_("Full set of FLAGS:")) + for flag in FLAGS: + flag_get = FLAGS.get(flag, None) + LOG.debug("%(flag)s : %(flag_get)s" % locals()) + + service = ApiService(conf) + service.start() -- cgit From 79f6c437b486262bab3faacb59197a5cae30b2bd Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 17:50:26 -0800 Subject: Added create static method to ApiService --- nova/apiservice.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nova/apiservice.py b/nova/apiservice.py index 7b453e19f..1914b9e59 100644 --- a/nova/apiservice.py +++ b/nova/apiservice.py @@ -72,6 +72,11 @@ class ApiService(object): def start(self): _run_app(self.conf) + @staticmethod + def create(): + conf = wsgi.paste_config_file('nova-api.conf') + return serve(conf) + def serve(conf): LOG.audit(_("Starting nova-api node (version %s)"), @@ -83,3 +88,5 @@ def serve(conf): service = ApiService(conf) service.start() + + return service -- cgit From e37e7b91a9fb5664ad50c1ff38e95f1a2d655c06 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 17:58:01 -0800 Subject: Support service-like wait behaviour for API service --- bin/nova-api | 3 ++- nova/apiservice.py | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 99417e6c6..ccb7701ae 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -49,4 +49,5 @@ if __name__ == '__main__': if not conf: LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf') else: - apiservice.serve(conf) + service = apiservice.serve(conf) + service.wait() diff --git a/nova/apiservice.py b/nova/apiservice.py index 1914b9e59..14239f196 100644 --- a/nova/apiservice.py +++ b/nova/apiservice.py @@ -60,7 +60,7 @@ def _run_app(paste_config_file): server = wsgi.Server() for app in apps: server.start(*app) - server.wait() + return server class ApiService(object): @@ -68,9 +68,13 @@ class ApiService(object): def __init__(self, conf): self.conf = conf + self.wsgi_app = None def start(self): - _run_app(self.conf) + self.wsgi_app = _run_app(self.conf) + + def wait(self): + self.wsgi_app.wait() @staticmethod def create(): -- cgit From 7c8c325f475926724f3243344803841e24d5cb84 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 18:15:29 -0800 Subject: Make static create method behave more like other services --- nova/apiservice.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/apiservice.py b/nova/apiservice.py index 14239f196..693bc9a63 100644 --- a/nova/apiservice.py +++ b/nova/apiservice.py @@ -79,7 +79,10 @@ class ApiService(object): @staticmethod def create(): conf = wsgi.paste_config_file('nova-api.conf') - return serve(conf) + LOG.audit(_("Starting nova-api node (version %s)"), + version.version_string_with_vcs()) + service = ApiService(conf) + return service def serve(conf): -- cgit From dd6b9c21d3ad493051f25ce632fb327ed7fc7b73 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 18:57:04 -0800 Subject: Exit with exit code 1 if conf cannot be read --- bin/nova-api | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/nova-api b/bin/nova-api index ccb7701ae..d03be85e3 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -48,6 +48,7 @@ if __name__ == '__main__': conf = wsgi.paste_config_file('nova-api.conf') if not conf: LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf') + sys.exit(1) else: service = apiservice.serve(conf) service.wait() -- cgit From 50e71cef14c3bd079fbc2d2c203b0e0f76ee869e Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 18:59:23 -0800 Subject: Removed unused import & formatting cleanups --- bin/nova-api | 1 - nova/apiservice.py | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index d03be85e3..96c784624 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -37,7 +37,6 @@ gettext.install('nova', unicode=1) from nova import flags from nova import log as logging from nova import apiservice -from nova import utils from nova import wsgi FLAGS = flags.FLAGS diff --git a/nova/apiservice.py b/nova/apiservice.py index 693bc9a63..6340e9b9b 100644 --- a/nova/apiservice.py +++ b/nova/apiservice.py @@ -16,9 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Wrapper for API service, makes it look more like the non-WSGI services -""" +"""Wrapper for API service, makes it look more like the non-WSGI services""" from nova import flags from nova import log as logging @@ -28,6 +26,7 @@ from nova import wsgi LOG = logging.getLogger('nova.api') + FLAGS = flags.FLAGS flags.DEFINE_string('ec2_listen', "0.0.0.0", 'IP address for EC2 API to listen') @@ -36,6 +35,7 @@ flags.DEFINE_string('osapi_listen', "0.0.0.0", 'IP address for OpenStack API to listen') flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen') + API_ENDPOINTS = ['ec2', 'osapi'] -- cgit From fbfc2b21657a2878ab97138c133a253f7c88303e Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 15:17:32 -0800 Subject: Alphabetize imports --- bin/nova-api | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-api b/bin/nova-api index 96c784624..933202dc8 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -34,9 +34,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import apiservice from nova import flags from nova import log as logging -from nova import apiservice from nova import wsgi FLAGS = flags.FLAGS -- cgit From 861a7f2b53f02af2ef196411171182394edd7e17 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 15:31:40 -0800 Subject: Changed create from a @staticmethod to a @classmethod --- nova/apiservice.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/apiservice.py b/nova/apiservice.py index 6340e9b9b..03aa781fb 100644 --- a/nova/apiservice.py +++ b/nova/apiservice.py @@ -76,12 +76,12 @@ class ApiService(object): def wait(self): self.wsgi_app.wait() - @staticmethod - def create(): + @classmethod + def create(cls): conf = wsgi.paste_config_file('nova-api.conf') LOG.audit(_("Starting nova-api node (version %s)"), version.version_string_with_vcs()) - service = ApiService(conf) + service = cls(conf) return service -- cgit From 503fe37427247b2728051426d3c40266de69bd71 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 15:59:54 -0800 Subject: Reverted bad-fix to sqlalchemy code --- nova/db/sqlalchemy/api.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 53498fbc5..d8751bef4 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1209,9 +1209,7 @@ def project_get_network_v6(context, project_id): def queue_get_for(_context, topic, physical_node_id): # FIXME(ja): this should be servername? - if physical_node_id: - return "%s.%s" % (topic, physical_node_id) - return topic + return "%s.%s" % (topic, physical_node_id) ################### -- cgit From 4229990fa77d6edb73b88e92750a8779c478e40c Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Sat, 26 Feb 2011 19:09:57 +0100 Subject: replaced ConnectionFailed with Exception in tools/euca-get-ajax-console was not working for me with euca2tools 1.2 (version 2007-10-10, release 31337) --- tools/euca-get-ajax-console | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/euca-get-ajax-console b/tools/euca-get-ajax-console index 37060e74f..e407dd566 100755 --- a/tools/euca-get-ajax-console +++ b/tools/euca-get-ajax-console @@ -35,7 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): import boto import nova from boto.ec2.connection import EC2Connection -from euca2ools import Euca2ool, InstanceValidationError, Util, ConnectionFailed +from euca2ools import Euca2ool, InstanceValidationError, Util usage_string = """ Retrieves a url to an ajax console terminal @@ -147,7 +147,7 @@ def main(): try: euca_conn = euca.make_connection() - except ConnectionFailed, e: + except Exception, e: print e.message sys.exit(1) try: -- cgit From 38c21546ecc079300c575e5950bcb990eecee3a3 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Sun, 27 Feb 2011 20:28:04 -0500 Subject: execute: shell=True removed. --- nova/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index 0cf91e0cc..40a8d8d8c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -125,7 +125,7 @@ def fetchfile(url, target): # c.perform() # c.close() # fp.close() - execute("curl --fail %s -o %s" % (url, target)) + execute("curl","--fail",url,"-o",target) def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): @@ -133,7 +133,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): env = os.environ.copy() if addl_env: env.update(addl_env) - obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, + obj = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) result = None if process_input != None: @@ -254,7 +254,7 @@ def last_octet(address): def get_my_linklocal(interface): try: - if_str = execute("ip -f inet6 -o addr show %s" % interface) + if_str = execute("ip","-f","inet6","-o","addr","show", interface) condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link" links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] -- cgit From 4f90783224025618661bf8814e016843ec237875 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Sun, 27 Feb 2011 20:52:32 -0500 Subject: execvp --- nova/volume/driver.py | 69 ++++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index e3744c790..e73202b73 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -97,22 +97,21 @@ class VolumeDriver(object): sizestr = '100M' else: sizestr = '%sG' % volume['size'] - self._try_execute("sudo lvcreate -L %s -n %s %s" % - (sizestr, + self._try_execute('sudo','lvcreate','-L',sizestr,'-n', volume['name'], - FLAGS.volume_group)) + FLAGS.volume_group) def delete_volume(self, volume): """Deletes a logical volume.""" try: - self._try_execute("sudo lvdisplay %s/%s" % + self._try_execute('sudo','lvdisplay','%s/%s" % (FLAGS.volume_group, volume['name'])) except Exception as e: # If the volume isn't present, then don't attempt to delete return True - self._try_execute("sudo lvremove -f %s/%s" % + self._try_execute('sudo','lvremove','-f',"%s/%s" % (FLAGS.volume_group, volume['name'])) @@ -168,12 +167,13 @@ class AOEDriver(VolumeDriver): blade_id) = self.db.volume_allocate_shelf_and_blade(context, volume['id']) self._try_execute( - "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (shelf_id, + 'sudo','vblade-persist','setup', + shelf_id, blade_id, FLAGS.aoe_eth_dev, - FLAGS.volume_group, - volume['name'])) + "/dev/%s/%s" % + (FLAGS.volume_group, + volume['name'])) # NOTE(vish): The standard _try_execute does not work here # because these methods throw errors if other # volumes on this host are in the process of @@ -182,9 +182,9 @@ class AOEDriver(VolumeDriver): # just wait a bit for the current volume to # be ready and ignore any errors. time.sleep(2) - self._execute("sudo vblade-persist auto all", + self._execute('sudo','vblade-persist','auto','all', check_exit_code=False) - self._execute("sudo vblade-persist start all", + self._execute('sudo','vblade-persist','start','all', check_exit_code=False) def remove_export(self, context, volume): @@ -192,15 +192,15 @@ class AOEDriver(VolumeDriver): (shelf_id, blade_id) = self.db.volume_get_shelf_and_blade(context, volume['id']) - self._try_execute("sudo vblade-persist stop %s %s" % - (shelf_id, blade_id)) - self._try_execute("sudo vblade-persist destroy %s %s" % - (shelf_id, blade_id)) + self._try_execute('sudo','vblade-persist','stop', + shelf_id, blade_id) + self._try_execute('sudo','vblade-persist','destroy', + shelf_id, blade_id) def discover_volume(self, _volume): """Discover volume on a remote host.""" - self._execute("sudo aoe-discover") - self._execute("sudo aoe-stat", check_exit_code=False) + self._execute('sudo','aoe-discover') + self._execute('sudo','aoe-stat', check_exit_code=False) def undiscover_volume(self, _volume): """Undiscover volume on a remote host.""" @@ -252,13 +252,16 @@ class ISCSIDriver(VolumeDriver): iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) - self._sync_exec("sudo ietadm --op new " - "--tid=%s --params Name=%s" % - (iscsi_target, iscsi_name), + self._sync_exec('sudo','ietadm','--op','new', + "--tid=%s" % iscsi_target, + '--params', + "Name=%s" % iscsi-name, check_exit_code=False) - self._sync_exec("sudo ietadm --op new --tid=%s " - "--lun=0 --params Path=%s,Type=fileio" % - (iscsi_target, volume_path), + self._sync_exec('sudo','ietadm','--op','new', + "--tid=%s" % iscsi_target, + '--lun=0', + '--params', + "Path=%s,Type=fileio" % volume_path, check_exit_code=False) def _ensure_iscsi_targets(self, context, host): @@ -490,16 +493,13 @@ class RBDDriver(VolumeDriver): size = 100 else: size = int(volume['size']) * 1024 - self._try_execute("rbd --pool %s --size %d create %s" % - (FLAGS.rbd_pool, - size, - volume['name'])) + self._try_execute('rbd','--pool',FLAGS.rbd_pool, + '--size', size,'create', volume['name']) def delete_volume(self, volume): """Deletes a logical volume.""" - self._try_execute("rbd --pool %s rm %s" % - (FLAGS.rbd_pool, - volume['name'])) + self._try_execute('rbd','--pool',FLAGS.rbd_pool, + 'rm', voluname['name']) def local_path(self, volume): """Returns the path of the rbd volume.""" @@ -534,7 +534,7 @@ class SheepdogDriver(VolumeDriver): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" try: - (out, err) = self._execute("collie cluster info") + (out, err) = self._execute('collie','cluster','info') if not out.startswith('running'): raise exception.Error(_("Sheepdog is not working: %s") % out) except exception.ProcessExecutionError: @@ -546,12 +546,13 @@ class SheepdogDriver(VolumeDriver): sizestr = '100M' else: sizestr = '%sG' % volume['size'] - self._try_execute("qemu-img create sheepdog:%s %s" % - (volume['name'], sizestr)) + self._try_execute('qemu-img','create', + "sheepdog:%s" %s" % volume['name'], + sizestr) def delete_volume(self, volume): """Deletes a logical volume""" - self._try_execute("collie vdi delete %s" % volume['name']) + self._try_execute('collie','vdi','delete',volume['name']) def local_path(self, volume): return "sheepdog:%s" % volume['name'] -- cgit From 953efce36b74c18a32ef9c42e6b1a57190e3ff6e Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Sun, 27 Feb 2011 20:53:53 -0500 Subject: execvp --- nova/crypto.py | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/nova/crypto.py b/nova/crypto.py index a34b940f5..b240a3958 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -105,8 +105,8 @@ def generate_key_pair(bits=1024): tmpdir = tempfile.mkdtemp() keyfile = os.path.join(tmpdir, 'temp') - utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile)) - (out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile)) + utils.execute('ssh-keygen','-q','-b',"%d" % bits,'-N','""','-f',keyfile) + (out, err) = utils.execute('ssh-keygen','-q','-l','-f',"%s.pub" % (keyfile)) fingerprint = out.split(' ')[1] private_key = open(keyfile).read() public_key = open(keyfile + '.pub').read() @@ -118,7 +118,7 @@ def generate_key_pair(bits=1024): # bio = M2Crypto.BIO.MemoryBuffer() # key.save_pub_key_bio(bio) # public_key = bio.read() - # public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key) + # public_key, err = execute('ssh-keygen','-y','-f','/dev/stdin', private_key) return (private_key, public_key, fingerprint) @@ -143,8 +143,8 @@ def revoke_cert(project_id, file_name): start = os.getcwd() os.chdir(ca_folder(project_id)) # NOTE(vish): potential race condition here - utils.execute("openssl ca -config ./openssl.cnf -revoke '%s'" % file_name) - utils.execute("openssl ca -gencrl -config ./openssl.cnf -out '%s'" % + utils.execute('openssl','ca','-config','./openssl.cnf','-revoke',"'%s'" % file_name) + utils.execute('openssl','ca','-gencrl','-config','./openssl.cnf','-out',"'%s'" % FLAGS.crl_file) os.chdir(start) @@ -193,9 +193,8 @@ def generate_x509_cert(user_id, project_id, bits=1024): tmpdir = tempfile.mkdtemp() keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) csrfile = os.path.join(tmpdir, 'temp.csr') - utils.execute("openssl genrsa -out %s %s" % (keyfile, bits)) - utils.execute("openssl req -new -key %s -out %s -batch -subj %s" % - (keyfile, csrfile, subject)) + utils.execute('openssl','genrsa','-out',keyfile,bits) + utils.execute('openssl','req','-new','-key',keyfile,'-out',csrfile,'-batch','-subj',subject) private_key = open(keyfile).read() csr = open(csrfile).read() shutil.rmtree(tmpdir) @@ -212,8 +211,7 @@ def _ensure_project_folder(project_id): if not os.path.exists(ca_path(project_id)): start = os.getcwd() os.chdir(ca_folder()) - utils.execute("sh geninter.sh %s %s" % - (project_id, _project_cert_subject(project_id))) + utils.execute('sh','geninter.sh',project_id, _project_cert_subject(project_id)) os.chdir(start) @@ -228,8 +226,8 @@ def generate_vpn_files(project_id): start = os.getcwd() os.chdir(ca_folder()) # TODO(vish): the shell scripts could all be done in python - utils.execute("sh genvpn.sh %s %s" % - (project_id, _vpn_cert_subject(project_id))) + utils.execute('sh','genvpn.sh', + project_id, _vpn_cert_subject(project_id)) with open(csr_fn, "r") as csrfile: csr_text = csrfile.read() (serial, signed_csr) = sign_csr(csr_text, project_id) @@ -259,9 +257,9 @@ def _sign_csr(csr_text, ca_folder): start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) - utils.execute("openssl ca -batch -out %s -config " - "./openssl.cnf -infiles %s" % (outbound, inbound)) - out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound) + utils.execute('openssl','ca','-batch','-out',outbound,'-config' + './openssl.cnf','-infiles',inbound) + out, _err = utils.execute('openssl','x509','-in',outbound','-serial','-noout') serial = out.rpartition("=")[2] os.chdir(start) with open(outbound, "r") as crtfile: -- cgit From 90abcdc7ae9e3f855dadb1ccc88892a2cc7bab05 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Sun, 27 Feb 2011 20:57:13 -0500 Subject: execvp --- nova/console/xvp.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/console/xvp.py b/nova/console/xvp.py index cd257e0a6..271dffa54 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -133,10 +133,10 @@ class XVPConsoleProxy(object): return logging.debug(_("Starting xvp")) try: - utils.execute('xvp -p %s -c %s -l %s' % - (FLAGS.console_xvp_pid, - FLAGS.console_xvp_conf, - FLAGS.console_xvp_log)) + utils.execute('xvp', + '-p',FLAGS.console_xvp_pid, + '-c',FLAGS.console_xvp_conf, + '-l',FLAGS.console_xvp_log) except exception.ProcessExecutionError, err: logging.error(_("Error starting xvp: %s") % err) @@ -190,5 +190,5 @@ class XVPConsoleProxy(object): flag = '-x' #xvp will blow up on passwords that are too long (mdragon) password = password[:maxlen] - out, err = utils.execute('xvp %s' % flag, process_input=password) + out, err = utils.execute('xvp', flag, process_input=password) return out.strip() -- cgit From 8b3e9ad11c2f5c425701f1eb4abb7b3f577ae1cc Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 28 Feb 2011 12:37:02 +0100 Subject: Add utils.synchronized decorator to allow for synchronising method entrance across multiple workers on the same host. --- nova/tests/test_misc.py | 37 ++++++++++++++++++++++++++++++++++++- nova/utils.py | 11 +++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index e6da6112a..154b6fae6 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -14,10 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. +from datetime import datetime +import errno import os +import select +import time from nova import test -from nova.utils import parse_mailmap, str_dict_replace +from nova.utils import parse_mailmap, str_dict_replace, synchronized class ProjectTestCase(test.TestCase): @@ -55,3 +59,34 @@ class ProjectTestCase(test.TestCase): '%r not listed in Authors' % missing) finally: tree.unlock() + + +class LockTestCase(test.TestCase): + def test_synchronized(self): + rpipe, wpipe = os.pipe() + pid = os.fork() + if pid > 0: + os.close(wpipe) + + @synchronized('testlock') + def f(): + rfds, _, __ = select.select([rpipe], [], [], 1) + self.assertEquals(len(rfds), 0, "The other process, which was" + " supposed to be locked, " + "wrote on its end of the " + "pipe") + os.close(rpipe) + + f() + else: + os.close(rpipe) + + @synchronized('testlock') + def g(): + try: + os.write(wpipe, "foo") + except OSError, e: + self.assertEquals(e.errno, errno.EPIPE) + return + g() + os._exit(0) diff --git a/nova/utils.py b/nova/utils.py index 0cf91e0cc..cb1ea5a7d 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -25,6 +25,7 @@ import base64 import datetime import inspect import json +import lockfile import os import random import socket @@ -491,6 +492,16 @@ def loads(s): return json.loads(s) +def synchronized(name): + def wrap(f): + def inner(*args, **kwargs): + lock = lockfile.FileLock('nova-%s.lock' % name) + with lock: + return f(*args, **kwargs) + return inner + return wrap + + def ensure_b64_encoding(val): """Safety method to ensure that values expected to be base64-encoded actually are. If they are, the value is returned unchanged. Otherwise, -- cgit From 7a6daa8d92f4f11fe2fce8fb2f4b11d96cb98c2d Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Mon, 28 Feb 2011 17:27:19 +0000 Subject: Suppress stack traces unless --verbose is specified --- nova/log.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/log.py b/nova/log.py index 87a21ddb4..d194ab8f0 100644 --- a/nova/log.py +++ b/nova/log.py @@ -266,7 +266,10 @@ class NovaRootLogger(NovaLogger): def handle_exception(type, value, tb): - logging.root.critical(str(value), exc_info=(type, value, tb)) + extra = {} + if FLAGS.verbose: + extra['exc_info'] = (type, value, tb) + logging.root.critical(str(value), **extra) def reset(): -- cgit From c1bcf1dead8734a02172b4ac20b24fbbb7dbb993 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 28 Feb 2011 11:40:22 -0600 Subject: Rename migration to coincide with latest trunk changes --- .../versions/004_add_instance_migrations.py | 61 ---------------------- .../versions/007_add_instance_migrations.py | 61 ++++++++++++++++++++++ 2 files changed, 61 insertions(+), 61 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/007_add_instance_migrations.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py deleted file mode 100644 index 4fda525f1..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_migrations.py +++ /dev/null @@ -1,61 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License.from sqlalchemy import * - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Tables -# - -migrations = Table('migrations', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('source_compute', String(255)), - Column('dest_compute', String(255)), - Column('dest_host', String(255)), - Column('instance_id', Integer, ForeignKey('instances.id'), - nullable=True), - Column('status', String(255)), - ) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - for table in (migrations, ): - try: - table.create() - except Exception: - logging.info(repr(table)) - logging.exception('Exception while creating table') - raise diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_instance_migrations.py new file mode 100644 index 000000000..4fda525f1 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_instance_migrations.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.from sqlalchemy import * + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Tables +# + +migrations = Table('migrations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('source_compute', String(255)), + Column('dest_compute', String(255)), + Column('dest_host', String(255)), + Column('instance_id', Integer, ForeignKey('instances.id'), + nullable=True), + Column('status', String(255)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (migrations, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise -- cgit From 8da6796789767b1341cb5a650066b67ad3191c74 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 28 Feb 2011 12:30:02 -0600 Subject: Merge review fixes --- nova/virt/xenapi/vm_utils.py | 17 ++++-- nova/virt/xenapi/vmops.py | 27 ++++++---- .../xenserver/xenapi/etc/xapi.d/plugins/migration | 62 +++++----------------- 3 files changed, 45 insertions(+), 61 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index e7ad1f686..870660dea 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -300,6 +300,13 @@ class VMHelper(HelperBase): the UUID """ return session.call_xenapi('SR.get_by_name_label', sr_label)[0] + @classmethod + def get_sr_path(cls, session, sr_label='slices'): + """ Finds the SR and then coerces it into a path on the dom0 file + system """ + # TODO(mdietz): replace this with the flag once unified-images merges + return '/var/run/sr-mount/%s' % cls.get_sr(session, sr_label) + @classmethod def upload_image(cls, session, instance_id, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and @@ -508,13 +515,17 @@ class VMHelper(HelperBase): @classmethod def scan_sr(cls, session, instance_id=None, sr_ref=None): + """Scans the SR specified by sr_ref""" if sr_ref: LOG.debug(_("Re-scanning SR %s"), sr_ref) task = session.call_xenapi('Async.SR.scan', sr_ref) session.wait_for_task(instance_id, task) - else: - sr_ref = cls.get_sr(session) - session.call_xenapi('SR.scan', sr_ref) + + @classmethod + def scan_default_sr(cls, session): + """Looks for the system default SR and triggers a re-scan""" + sr_ref = cls.get_sr(session) + session.call_xenapi('SR.scan', sr_ref) def get_rrd(host, uuid): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index f5278ff07..b3e5627d8 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -298,8 +298,10 @@ class VMOps(object): VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) cow_uuid = vm_vdi_rec['uuid'] - params = {'host': dest, 'vdi_uuid': base_copy_uuid, - 'instance_id': instance.id, } + params = {'host': dest, + 'vdi_uuid': base_copy_uuid, + 'instance_id': instance.id, + 'sr_path': VMHelper.get_sr_path(self._session), } task = self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) @@ -308,8 +310,11 @@ class VMOps(object): # Now power down the instance and transfer the COW VHD self._shutdown(instance, vm_ref, method='clean') - params = {'host': dest, 'vdi_uuid': cow_uuid, - 'instance_id': instance.id, } + params = {'host': dest, + 'vdi_uuid': cow_uuid, + 'instance_id': instance.id, + 'sr_path': VMHelper.get_sr_path(self._session), } + task = self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) self._session.wait_for_task(instance.id, task) @@ -326,14 +331,15 @@ class VMOps(object): 'old_base_copy_uuid': disk_info['base_copy'], 'old_cow_uuid': disk_info['cow'], 'new_base_copy_uuid': new_base_copy_uuid, - 'new_cow_uuid': new_cow_uuid, } + 'new_cow_uuid': new_cow_uuid, + 'sr_path': VMHelper.get_sr_path(self._session), } task = self._session.async_call_plugin('migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)}) self._session.wait_for_task(instance.id, task) # Now we rescan the SR so we find the VHDs - VMHelper.scan_sr(self._session) + VMHelper.scan_default_sr(self._session) return new_cow_uuid @@ -411,7 +417,7 @@ class VMOps(object): raise RuntimeError(resp_dict['message']) return resp_dict['message'] - def _shutdown(self, instance, vm, method='hard'): + def _shutdown(self, instance, vm, hard=True): """Shutdown an instance """ state = self.get_info(instance['name'])['state'] if state == power_state.SHUTDOWN: @@ -421,10 +427,11 @@ class VMOps(object): try: task = None - if method == 'clean': - task = self._session.call_xenapi('Async.VM.clean_shutdown', vm) - else: + if hard: task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) + else: + task = self._session.call_xenapi('Async.VM.clean_shutdown', vm) + self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: LOG.exception(exc) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 7a6eefda2..4aa89863a 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -1,7 +1,6 @@ #!/usr/bin/env python -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -31,38 +30,6 @@ import XenAPIPlugin from pluginlib_nova import * configure_logging('migration') -SSH_HOSTS = '/root/.ssh/known_hosts' -DEVNULL = '/dev/null' -KEYSCAN = '/usr/bin/ssh-keyscan' -RSYNC = '/usr/bin/rsync' -FILE_SR_PATH = '/var/run/sr-mount' -IMAGE_PATH = '/images/' -VHD_UTIL = '/usr/sbin/vhd-util' - -def get_sr_path(session): - sr_ref = find_sr(session) - - if sr_ref is None: - raise Exception('Cannot find SR to read VDI from') - - sr_rec = session.xenapi.SR.get_record(sr_ref) - sr_uuid = sr_rec["uuid"] - sr_path = os.path.join(FILE_SR_PATH, sr_uuid) - return sr_path - -def find_sr(session): - host = get_this_host(session) - srs = session.xenapi.SR.get_all() - for sr in srs: - sr_rec = session.xenapi.SR.get_record(sr) - if not ('i18n-key' in sr_rec['other_config'] and - sr_rec['other_config']['i18n-key'] == 'local-storage'): - continue - for pbd in sr_rec['PBDs']: - pbd_rec = session.xenapi.PBD.get_record(pbd) - if pbd_rec['host'] == host: - return sr - return None def move_vhds_into_sr(session, args): """Moves the VHDs from their copied location to the SR""" @@ -75,13 +42,13 @@ def move_vhds_into_sr(session, args): new_base_copy_uuid = params['new_base_copy_uuid'] new_cow_uuid = params['new_cow_uuid'] - sr_path = get_sr_path(session) + sr_path = params['sr_path'] sr_temp_path = "%s/images/" % sr_path - # Discover the copied VHDs locally, and then set up paths to copy + # Discover the copied VHDs locally, and then set up paths to copy # them to under the SR - source_image_path = "%s/instance%d" % (IMAGE_PATH, instance_id) - source_base_copy_path = "%s/%s.vhd" % (source_image_path, + source_image_path = "%s/instance%d" % ('/images/', instance_id) + source_base_copy_path = "%s/%s.vhd" % (source_image_path, old_base_copy_uuid) source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid) @@ -102,11 +69,10 @@ def move_vhds_into_sr(session, args): os.rmdir(source_image_path) # Link the COW to the base copy - logging.debug('Attaching COW to the base copy %s -> %s' % + logging.debug('Attaching COW to the base copy %s -> %s' % (new_cow_path, new_base_copy_path)) - subprocess.call([VHD_UTIL, 'modify', '-n', new_cow_path, '-p', - new_base_copy_path]) - + subprocess.call(shlex.split('/usr/sbin/vhd-util modify -n %s -p %s' % + (new_cow_path, new_base_copy_path))) logging.debug('Moving VHDs into SR %s' % sr_path) shutil.move("%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid), sr_path) shutil.move("%s/%s.vhd" % (temp_vhd_path, new_cow_uuid), sr_path) @@ -122,19 +88,19 @@ def transfer_vhd(session, args): instance_id = params['instance_id'] host = params['host'] vdi_uuid = params['vdi_uuid'] - sr_path = get_sr_path(session) + sr_path = params['sr_path'] vhd_path = "%s.vhd" % vdi_uuid source_path = "%s/%s" % (sr_path, vhd_path) - dest_path = '%s:%sinstance%d/' % (host, IMAGE_PATH, instance_id) + dest_path = '%s:%sinstance%d/' % (host, '/images/', instance_id) - logging.debug("Preparing to transmit %s to %s" % (source_path, + logging.debug("Preparing to transmit %s to %s" % (source_path, dest_path)) ssh_cmd = 'ssh -o StrictHostKeyChecking=no' - rsync_args = ['nohup', RSYNC, '-av', '--progress', '-e', ssh_cmd, - source_path, dest_path] + rsync_args = shlex.split('nohup /usr/bin/rsync -av --progress -e %s %s %s' + % (ssh_cmd, source_path, dest_path)) logging.debug('rsync %s' % (' '.join(rsync_args, ))) @@ -148,4 +114,4 @@ def transfer_vhd(session, args): if __name__ == '__main__': XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd, - 'move_vhds_into_sr':move_vhds_into_sr, }) + 'move_vhds_into_sr': move_vhds_into_sr, }) -- cgit From d5736e925f288462f6325130be0af49f0ace5884 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 28 Feb 2011 23:31:09 +0100 Subject: Add a lock_path flag for lock files. --- nova/flags.py | 2 ++ nova/utils.py | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 8cf199b2f..213d4d4e1 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -321,6 +321,8 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), "Top-level directory for maintaining nova's state") +DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'), + "Directory for lock files") DEFINE_string('logdir', None, 'output to a per-service log file in named ' 'directory') diff --git a/nova/utils.py b/nova/utils.py index cb1ea5a7d..48f12350f 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -44,11 +44,13 @@ from eventlet.green import subprocess from nova import exception from nova.exception import ProcessExecutionError +from nova import flags from nova import log as logging LOG = logging.getLogger("nova.utils") TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +FLAGS = flags.FLAGS def import_class(import_str): @@ -495,7 +497,8 @@ def loads(s): def synchronized(name): def wrap(f): def inner(*args, **kwargs): - lock = lockfile.FileLock('nova-%s.lock' % name) + lock = lockfile.FileLock(os.path.join(FLAGS.lock_path, + 'nova-%s.lock' % name)) with lock: return f(*args, **kwargs) return inner -- cgit From be9004ffa4c70358c8edda1f33ffe7ba7e1ae1ee Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 1 Mar 2011 20:49:46 +0100 Subject: Use functools.wraps to make sure wrapped method's metadata (docstring and name) doesn't get mangled. --- nova/tests/test_misc.py | 12 ++++++++++-- nova/utils.py | 6 ++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index 154b6fae6..9f572b58e 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -14,11 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. -from datetime import datetime import errno import os import select -import time from nova import test from nova.utils import parse_mailmap, str_dict_replace, synchronized @@ -62,6 +60,16 @@ class ProjectTestCase(test.TestCase): class LockTestCase(test.TestCase): + def test_synchronized_wrapped_function_metadata(self): + @synchronized('whatever') + def foo(): + """Bar""" + pass + self.assertEquals(foo.__doc__, 'Bar', "Wrapped function's docstring " + "got lost") + self.assertEquals(foo.__name__, 'foo', "Wrapped function's name " + "got mangled") + def test_synchronized(self): rpipe, wpipe = os.pipe() pid = os.fork() diff --git a/nova/utils.py b/nova/utils.py index 48f12350f..9929e6fef 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -23,11 +23,14 @@ System-level utilities and helper functions. import base64 import datetime +import functools import inspect import json import lockfile +import netaddr import os import random +import re import socket import string import struct @@ -35,8 +38,6 @@ import sys import time import types from xml.sax import saxutils -import re -import netaddr from eventlet import event from eventlet import greenthread @@ -496,6 +497,7 @@ def loads(s): def synchronized(name): def wrap(f): + @functools.wraps(f) def inner(*args, **kwargs): lock = lockfile.FileLock(os.path.join(FLAGS.lock_path, 'nova-%s.lock' % name)) -- cgit From 58ac632f8e08b248d234deffdb56fe3a33a25130 Mon Sep 17 00:00:00 2001 From: Masanori Itoh Date: Thu, 3 Mar 2011 00:12:48 +0900 Subject: Port Todd's lp720157 fix to the current trunk, rev 752. --- nova/api/ec2/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 5adc2c075..5a63dc8da 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -198,6 +198,12 @@ class Requestify(wsgi.Middleware): try: # Raise KeyError if omitted action = req.params['Action'] + # Fix bug lp:720157 for older (version 1) clients + version = req.params['SignatureVersion'] + if int(version) == 1: + non_args.remove('SignatureMethod') + if 'SignatureMethod' in args: + args.pop('SignatureMethod') for non_arg in non_args: # Remove, but raise KeyError if omitted args.pop(non_arg) -- cgit From f617fc087367a3d65bd4b826bf735f65fec9d2fd Mon Sep 17 00:00:00 2001 From: Masanori Itoh Date: Thu, 3 Mar 2011 00:28:04 +0900 Subject: Change DescribeKeyPairs response tag from keypairsSet to keySet, and fix lp720133. --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 844ccbe5e..c6309f03c 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -298,7 +298,7 @@ class CloudController(object): 'keyFingerprint': key_pair['fingerprint'], }) - return {'keypairsSet': result} + return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Create key pair %s"), key_name, context=context) -- cgit From 953fe68ce9b27322003200c464c121464761d1e2 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 2 Mar 2011 15:46:50 -0600 Subject: merge fixes --- nova/compute/api.py | 4 +- nova/virt/xenapi/vm_utils.py | 4 +- nova/virt/xenapi/vmops.py | 48 ++++++++++------------ plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 2 +- 4 files changed, 27 insertions(+), 31 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 52d30c7f7..fb7ef1aed 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -430,7 +430,7 @@ class API(base.Base): migration_ref = self.db.migration_get_by_instance_and_status(context, instance_id, 'finished') if not migration_ref: - raise exception.Error(_("No finished migrations found for \ + raise exception.NotFound(_("No finished migrations found for \ instance")) params = {'migration_id': migration_ref['id']} @@ -444,7 +444,7 @@ class API(base.Base): migration_ref = self.db.migration_get_by_instance_and_status(context, instance_id, 'finished') if not migration_ref: - raise exception.Error(_("No finished migrations found for \ + raise exception.NotFound(_("No finished migrations found for \ instance")) instance_ref = self.db.instance_get(context, instance_id) params = {'migration_id': migration_ref['id']} diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index eea9bb0b9..3ee963fa7 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -310,8 +310,7 @@ class VMHelper(HelperBase): def get_sr_path(cls, session, sr_label='slices'): """ Finds the SR and then coerces it into a path on the dom0 file system """ - # TODO(mdietz): replace this with the flag once unified-images merges - return '/var/run/sr-mount/%s' % cls.get_sr(session, sr_label) + return FLAGS.xenapi_sr_base_path + cls.get_sr(session, sr_label) @classmethod def upload_image(cls, session, instance_id, vdi_uuids, image_id): @@ -649,6 +648,7 @@ class VMHelper(HelperBase): @classmethod def scan_default_sr(cls, session): """Looks for the system default SR and triggers a re-scan""" + #FIXME(sirp/mdietz): refactor scan_default_sr in there sr_ref = cls.get_sr(session) session.call_xenapi('SR.scan', sr_ref) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5157f18f1..b54084842 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -105,7 +105,7 @@ class VMOps(object): vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', disk) if disk_image_type == ImageType.DISK_RAW: - #Have a look at the VDI and see if it has a PV kernel + # Have a look at the VDI and see if it has a PV kernel pv_kernel = VMHelper.lookup_image(self._session, instance.id, vdi_ref) elif disk_image_type == ImageType.DISK_VHD: @@ -113,7 +113,6 @@ class VMOps(object): # configurable as Windows will use HVM. pv_kernel = True - #Have a look at the VDI and see if it has a PV kernel if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) @@ -240,29 +239,20 @@ class VMOps(object): that will bundle the VHDs together and then push the bundle into Glance. """ - - with self._get_snapshot(instance) as snapshot: + template_vm_ref = None + try: + template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) # call plugin to ship snapshot off to glance VMHelper.upload_image( - self._session, instance.id, snapshot.vdi_uuids, image_id) + self._session, instance.id, template_vdi_uuids, image_id) + finally: + if template_vm_ref: + self.virt._destroy(self.instance, template_vm_ref, shutdown=False, + destroy_kernel_ramdisk=False) logging.debug(_("Finished snapshot and upload for VM %s"), instance) def _get_snapshot(self, instance): - class Snapshot(object): - def __init__(self, virt, instance, vm_ref, vdis): - self.instance = instance - self.vdi_uuids = vdis - self.virt = virt - self.vm_ref = vm_ref - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.virt._destroy(self.instance, self.vm_ref, shutdown=False, - destroy_kernel_ramdisk=False) - #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added @@ -273,8 +263,7 @@ class VMOps(object): try: template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) - return Snapshot(self, instance, template_vm_ref, - template_vdi_uuids) + return template_vm_ref, template_vdi_uuids except self.XenAPI.Failure, exc: logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s") % locals()) @@ -294,9 +283,11 @@ class VMOps(object): # from the snapshot creation base_copy_uuid = cow_uuid = None - with self._get_snapshot(instance) as snapshot: + template_vdi_uuids = template_vm_ref = None + try: # transfer the base copy - base_copy_uuid = snapshot.vdi_uuids[1] + template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) + base_copy_uuid = template_vdi_uuids[1] vdi_ref, vm_vdi_rec = \ VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) cow_uuid = vm_vdi_rec['uuid'] @@ -304,7 +295,7 @@ class VMOps(object): params = {'host': dest, 'vdi_uuid': base_copy_uuid, 'instance_id': instance.id, - 'sr_path': VMHelper.get_sr_path(self._session), } + 'sr_path': VMHelper.get_sr_path(self._session)} task = self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) @@ -322,6 +313,11 @@ class VMOps(object): {'params': pickle.dumps(params)}) self._session.wait_for_task(instance.id, task) + finally: + if template_vm_ref: + self.virt._destroy(self.instance, template_vm_ref, shutdown=False, + destroy_kernel_ramdisk=False) + # TODO(mdietz): we could also consider renaming these to something # sensible so we don't need to blindly pass around dictionaries return {'base_copy': base_copy_uuid, 'cow': cow_uuid} @@ -332,9 +328,9 @@ class VMOps(object): new_cow_uuid = str(uuid.uuid4()) params = {'instance_id': instance.id, 'old_base_copy_uuid': disk_info['base_copy'], - 'old_cow_uuid': disk_info['cow'], + 'old_cow_uuid': disk_info['cow'], 'new_base_copy_uuid': new_base_copy_uuid, - 'new_cow_uuid': new_cow_uuid, + 'new_cow_uuid': new_cow_uuid, 'sr_path': VMHelper.get_sr_path(self._session), } task = self._session.async_call_plugin('migration', diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index d08754c19..aa12d432a 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -207,7 +207,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): 'transfer-encoding': 'chunked', 'x-image-meta-is_public': 'True', 'x-image-meta-status': 'queued', - 'x-image-meta-type': 'vhd', } + 'x-image-meta-type': 'vhd'} for header, value in headers.iteritems(): conn.putheader(header, value) conn.endheaders() -- cgit From f9cce74cad854d377de113a619dc42df10b9a2ba Mon Sep 17 00:00:00 2001 From: Masanori Itoh Date: Thu, 3 Mar 2011 14:10:42 +0900 Subject: Updated Authors and .mailmap --- .mailmap | 1 + Authors | 1 + 2 files changed, 2 insertions(+) diff --git a/.mailmap b/.mailmap index df012e066..ed4404ad5 100644 --- a/.mailmap +++ b/.mailmap @@ -15,6 +15,7 @@ + diff --git a/Authors b/Authors index b279d8a53..7993955e2 100644 --- a/Authors +++ b/Authors @@ -39,6 +39,7 @@ Ken Pepple Kevin L. Mitchell Koji Iida Lorin Hochstein +Masanori Itoh Matt Dietz Michael Gundlach Monsyne Dragon -- cgit From 26c217d1f16b100b9dc615388ee315e6daf336ce Mon Sep 17 00:00:00 2001 From: Masanori Itoh Date: Fri, 4 Mar 2011 00:30:19 +0900 Subject: Updated DescribeKeyPairs response tag checked in nova/tests/test_cloud.py --- nova/tests/test_cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 061910013..b195fa520 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -267,7 +267,7 @@ class CloudTestCase(test.TestCase): self._create_key('test1') self._create_key('test2') result = self.cloud.describe_key_pairs(self.context) - keys = result["keypairsSet"] + keys = result["keySet"] self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) -- cgit From a62e603e8b1cedd89ca0c71f1cdc928d19c68a4d Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 3 Mar 2011 11:04:33 -0500 Subject: adding wsgi.Request class to add custom best_match; adding new class to wsgify decorators; replacing all references to webob.Request in non-test code to wsgi.Request --- nova/api/direct.py | 4 ++-- nova/api/ec2/__init__.py | 14 +++++------ nova/api/ec2/metadatarequesthandler.py | 2 +- nova/api/openstack/__init__.py | 4 ++-- nova/api/openstack/auth.py | 4 ++-- nova/api/openstack/common.py | 2 +- nova/api/openstack/faults.py | 2 +- nova/api/openstack/ratelimiting/__init__.py | 4 ++-- nova/wsgi.py | 37 ++++++++++++++++++++++------- 9 files changed, 47 insertions(+), 26 deletions(-) diff --git a/nova/api/direct.py b/nova/api/direct.py index 208b6d086..cd237f649 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -187,7 +187,7 @@ class ServiceWrapper(wsgi.Controller): def __init__(self, service_handle): self.service_handle = service_handle - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] @@ -218,7 +218,7 @@ class Proxy(object): self.prefix = prefix def __do_request(self, path, context, **kwargs): - req = webob.Request.blank(path) + req = wsgi.Request.blank(path) req.method = 'POST' req.body = urllib.urlencode({'json': utils.dumps(kwargs)}) req.environ['openstack.context'] = context diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 5adc2c075..58b1ecf03 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -53,7 +53,7 @@ flags.DEFINE_list('lockout_memcached_servers', None, class RequestLogging(wsgi.Middleware): """Access-Log akin logging for all EC2 API requests.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): start = utils.utcnow() rv = req.get_response(self.application) @@ -112,7 +112,7 @@ class Lockout(wsgi.Middleware): debug=0) super(Lockout, self).__init__(application) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): access_key = str(req.params['AWSAccessKeyId']) failures_key = "authfailures-%s" % access_key @@ -141,7 +141,7 @@ class Authenticate(wsgi.Middleware): """Authenticate an EC2 request and add 'ec2.context' to WSGI environ.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): # Read request signature and access id. try: @@ -190,7 +190,7 @@ class Requestify(wsgi.Middleware): super(Requestify, self).__init__(app) self.controller = utils.import_class(controller)() - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Version', 'Timestamp'] @@ -269,7 +269,7 @@ class Authorizer(wsgi.Middleware): }, } - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['ec2.context'] controller = req.environ['ec2.request'].controller.__class__.__name__ @@ -303,7 +303,7 @@ class Executor(wsgi.Application): response, or a 400 upon failure. """ - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['ec2.context'] api_request = req.environ['ec2.request'] @@ -365,7 +365,7 @@ class Executor(wsgi.Application): class Versions(wsgi.Application): - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Respond to a request for all EC2 versions.""" # available api versions diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 6fb441656..28f99b0ef 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -65,7 +65,7 @@ class MetadataRequestHandler(wsgi.Application): data = data[item] return data - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): cc = cloud.CloudController() remote_address = req.remote_addr diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 274330e3b..b5439788d 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -47,7 +47,7 @@ flags.DEFINE_bool('allow_admin_api', class FaultWrapper(wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) @@ -115,7 +115,7 @@ class APIRouter(wsgi.Router): class Versions(wsgi.Application): - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Respond to a request for all OpenStack API versions.""" response = { diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 6011e6115..de8905f46 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -46,7 +46,7 @@ class AuthMiddleware(wsgi.Middleware): self.auth = auth.manager.AuthManager() super(AuthMiddleware, self).__init__(application) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if not self.has_authentication(req): return self.authenticate(req) @@ -121,7 +121,7 @@ class AuthMiddleware(wsgi.Middleware): username - string key - string API key - req - webob.Request object + req - wsgi.Request object """ ctxt = context.get_admin_context() user = self.auth.get_user_from_access_key(key) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 9f85c5c8a..49b970d75 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -25,7 +25,7 @@ def limited(items, request, max_limit=1000): Return a slice of items according to requested offset and limit. @param items: A sliceable entity - @param request: `webob.Request` possibly containing 'offset' and 'limit' + @param request: `wsgi.Request` possibly containing 'offset' and 'limit' GET variables. 'offset' is where to start in the list, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 224a7ef0b..c70b00fa3 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -42,7 +42,7 @@ class Fault(webob.exc.HTTPException): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index cbb4b897e..88ffc3246 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -57,7 +57,7 @@ class RateLimitingMiddleware(wsgi.Middleware): self.limiter = WSGIAppProxy(service_host) super(RateLimitingMiddleware, self).__init__(application) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Rate limit the request. @@ -183,7 +183,7 @@ class WSGIApp(object): """Create the WSGI application using the given Limiter instance.""" self.limiter = limiter - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): parts = req.path_info.split('/') # format: /limiter// diff --git a/nova/wsgi.py b/nova/wsgi.py index 1eb66d067..67216d540 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -82,6 +82,27 @@ class Server(object): log=WritableLogger(logger)) +class Request(webob.Request): + + def best_match(self): + """ + Determine the most acceptable content-type based on the + query extension then the Accept header + """ + + parts = self.path.rsplit(".", 1) + + if len(parts) > 1: + format = parts[1] + if format in ["json", "xml"]: + return "application/{0}".format(parts[1]) + + ctypes = ["application/json", "application/xml"] + bm = self.accept.best_match(ctypes) + + return bm or "application/json" + + class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @@ -113,7 +134,7 @@ class Application(object): def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: @@ -199,7 +220,7 @@ class Middleware(Application): """Do whatever you'd like to the response.""" return response - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): response = self.process_request(req) if response: @@ -212,7 +233,7 @@ class Debug(Middleware): """Helper class that can be inserted into any WSGI application chain to get information about the request and response.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): print ("*" * 40) + " REQUEST ENVIRON" for key, value in req.environ.items(): @@ -276,7 +297,7 @@ class Router(object): self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """ Route the incoming request to a controller based on self.map. @@ -285,7 +306,7 @@ class Router(object): return self._router @staticmethod - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def _dispatch(req): """ Called by self._router after matching the incoming request to a route @@ -304,11 +325,11 @@ class Controller(object): WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon itself. All action methods must, in addition to their normal parameters, accept a 'req' argument - which is the incoming webob.Request. They raise a webob.exc exception, + which is the incoming wsgi.Request. They raise a webob.exc exception, or return a dict which will be serialized by requested content type. """ - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """ Call the method specified in req.environ by RoutesMiddleware. @@ -358,7 +379,7 @@ class Serializer(object): needed to serialize a dictionary to that type. """ self.metadata = metadata or {} - req = webob.Request.blank('', environ) + req = wsgi.Request.blank('', environ) suffix = req.path_info.split('.')[-1].lower() if suffix == 'json': self.handler = self._to_json -- cgit From 6d075754bdd4090342bf4f79c726a52923c311a8 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 3 Mar 2011 12:45:34 -0500 Subject: adding wsgi.Controller and wsgi.Request testing; fixing format keyword argument exception --- nova/tests/api/test_wsgi.py | 120 +++++++++++++++++++++++++++++++++++++------- nova/wsgi.py | 4 +- 2 files changed, 105 insertions(+), 19 deletions(-) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 2c7852214..cf2d0e297 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -21,11 +21,13 @@ Test WSGI basics and provide some helper functions for other WSGI tests. """ +import json from nova import test import routes import webob +from nova import exception from nova import wsgi @@ -66,30 +68,112 @@ class Test(test.TestCase): result = webob.Request.blank('/bad').get_response(Router()) self.assertNotEqual(result.body, "Router result") - def test_controller(self): - class Controller(wsgi.Controller): - """Test controller to call from router.""" - test = self +class ControllerTest(test.TestCase): + + class TestRouter(wsgi.Router): + + class TestController(wsgi.Controller): + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "test": ["id"]}}} def show(self, req, id): # pylint: disable-msg=W0622,C0103 - """Default action called for requests with an ID.""" - self.test.assertEqual(req.path_info, '/tests/123') - self.test.assertEqual(id, '123') - return id + return {"test": {"id": id}} + + def __init__(self): + mapper = routes.Mapper() + mapper.resource("test", "tests", controller=self.TestController()) + wsgi.Router.__init__(self, mapper) + + def test_show(self): + request = wsgi.Request.blank('/tests/123') + result = request.get_response(self.TestRouter()) + self.assertEqual(json.loads(result.body), {"test": {"id": "123"}}) + + def test_content_type_from_accept_xml(self): + request = webob.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml" + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/xml") + + def test_content_type_from_accept_json(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/json") + + def test_content_type_from_query_extension_xml(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/xml") + + def test_content_type_from_query_extension_json(self): + request = wsgi.Request.blank('/tests/123.json') + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/json") + + def test_content_type_default_when_unsupported(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.get_response(self.TestRouter()) + self.assertEqual(result.status_int, 200) + self.assertEqual(result.headers["Content-Type"], "application/json") + + +class RequestTest(test.TestCase): + + def test_content_type_from_accept_xml(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml" + result = request.best_match() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_match() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = \ + "application/json; q=0.3, application/xml; q=0.9" + result = request.best_match() + self.assertEqual(result, "application/xml") + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.best_match() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match() + self.assertEqual(result, "application/json") + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + request.headers["Accept"] = "application/json" + result = request.best_match() + self.assertEqual(result, "application/xml") + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match() + self.assertEqual(result, "application/json") - class Router(wsgi.Router): - """Test router.""" - def __init__(self): - mapper = routes.Mapper() - mapper.resource("test", "tests", controller=Controller()) - super(Router, self).__init__(mapper) - result = webob.Request.blank('/tests/123').get_response(Router()) - self.assertEqual(result.body, "123") - result = webob.Request.blank('/test/123').get_response(Router()) - self.assertNotEqual(result.body, "123") class SerializerTest(test.TestCase): diff --git a/nova/wsgi.py b/nova/wsgi.py index 67216d540..4577439cb 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -339,6 +339,8 @@ class Controller(object): method = getattr(self, action) del arg_dict['controller'] del arg_dict['action'] + if 'format' in arg_dict: + del arg_dict['format'] arg_dict['req'] = req result = method(**arg_dict) if type(result) is dict: @@ -379,7 +381,7 @@ class Serializer(object): needed to serialize a dictionary to that type. """ self.metadata = metadata or {} - req = wsgi.Request.blank('', environ) + req = Request.blank('', environ) suffix = req.path_info.split('.')[-1].lower() if suffix == 'json': self.handler = self._to_json -- cgit From c363c2aaacb01cbbe8dcdaa4bda2e5d2531ab8e8 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 3 Mar 2011 18:21:54 +0000 Subject: Use %s in case instance_id came through as a string --- nova/compute/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 35a7d7bc0..5f68fcc4d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -319,12 +319,12 @@ class API(base.Base): try: instance = self.get(context, instance_id) except exception.NotFound: - LOG.warning(_("Instance %d was not found during terminate"), + LOG.warning(_("Instance %s was not found during terminate"), instance_id) raise if (instance['state_description'] == 'terminating'): - LOG.warning(_("Instance %d is already being terminated"), + LOG.warning(_("Instance %s is already being terminated"), instance_id) return -- cgit From 0e1a458166ad1e89ca0755d88b8efec39855ee5c Mon Sep 17 00:00:00 2001 From: Cerberus Date: Thu, 3 Mar 2011 13:18:37 -0600 Subject: Renaming my migration yet again --- .../versions/007_add_instance_migrations.py | 61 ---------------------- .../versions/009_add_instance_migrations.py | 61 ++++++++++++++++++++++ 2 files changed, 61 insertions(+), 61 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/007_add_instance_migrations.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_instance_migrations.py deleted file mode 100644 index 4fda525f1..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/007_add_instance_migrations.py +++ /dev/null @@ -1,61 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License.from sqlalchemy import * - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Tables -# - -migrations = Table('migrations', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('source_compute', String(255)), - Column('dest_compute', String(255)), - Column('dest_host', String(255)), - Column('instance_id', Integer, ForeignKey('instances.id'), - nullable=True), - Column('status', String(255)), - ) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - for table in (migrations, ): - try: - table.create() - except Exception: - logging.info(repr(table)) - logging.exception('Exception while creating table') - raise diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py new file mode 100644 index 000000000..4fda525f1 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.from sqlalchemy import * + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Tables +# + +migrations = Table('migrations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('source_compute', String(255)), + Column('dest_compute', String(255)), + Column('dest_host', String(255)), + Column('instance_id', Integer, ForeignKey('instances.id'), + nullable=True), + Column('status', String(255)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (migrations, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise -- cgit From 848aced747a60c47d76efcb2147041339df4a628 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 3 Mar 2011 17:21:21 -0500 Subject: Refactor wsgi.Serializer away from handling Requests directly; now require Content-Type in all requests; fix tests according to new code --- nova/api/direct.py | 2 +- nova/api/openstack/__init__.py | 4 +- nova/api/openstack/consoles.py | 2 +- nova/api/openstack/faults.py | 5 +- nova/api/openstack/images.py | 2 +- nova/api/openstack/servers.py | 9 ++- nova/api/openstack/zones.py | 4 +- nova/exception.py | 4 ++ nova/tests/api/openstack/test_servers.py | 1 + nova/tests/api/openstack/test_zones.py | 15 +++-- nova/tests/api/test_wsgi.py | 95 +++++++++++++++++++------------ nova/tests/test_direct.py | 3 + nova/wsgi.py | 96 ++++++++++++++++++++------------ 13 files changed, 152 insertions(+), 90 deletions(-) diff --git a/nova/api/direct.py b/nova/api/direct.py index cd237f649..1d699f947 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -206,7 +206,7 @@ class ServiceWrapper(wsgi.Controller): params = dict([(str(k), v) for (k, v) in params.iteritems()]) result = method(context, **params) if type(result) is dict or type(result) is list: - return self._serialize(result, req) + return self._serialize(result, req.best_match()) else: return result diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b5439788d..6e1a2a06c 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -124,4 +124,6 @@ class Versions(wsgi.Application): metadata = { "application/xml": { "attributes": dict(version=["status", "id"])}} - return wsgi.Serializer(req.environ, metadata).to_content_type(response) + + content_type = req.best_match() + return wsgi.Serializer(metadata).serialize(response, content_type) diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 9ebdbe710..8c291c2eb 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -65,7 +65,7 @@ class Controller(wsgi.Controller): def create(self, req, server_id): """Creates a new console""" - #info = self._deserialize(req.body, req) + #info = self._deserialize(req.body, req.get_content_type()) self.console_api.create_console( req.environ['nova.context'], int(server_id)) diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index c70b00fa3..075fdb997 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -57,6 +57,7 @@ class Fault(webob.exc.HTTPException): fault_data[fault_name]['retryAfter'] = retry # 'code' is an attribute on the fault tag itself metadata = {'application/xml': {'attributes': {fault_name: 'code'}}} - serializer = wsgi.Serializer(req.environ, metadata) - self.wrapped_exc.body = serializer.to_content_type(fault_data) + serializer = wsgi.Serializer(metadata) + content_type = req.best_match() + self.wrapped_exc.body = serializer.serialize(fault_data, content_type) return self.wrapped_exc diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index cf85a496f..98f0dd96b 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -151,7 +151,7 @@ class Controller(wsgi.Controller): def create(self, req): context = req.environ['nova.context'] - env = self._deserialize(req.body, req) + env = self._deserialize(req.body, req.get_content_type()) instance_id = env["image"]["serverId"] name = env["image"]["name"] diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 08b95b46a..24d2826af 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -141,7 +141,7 @@ class Controller(wsgi.Controller): def create(self, req): """ Creates a new server for a given user """ - env = self._deserialize(req.body, req) + env = self._deserialize(req.body, req.get_content_type()) if not env: return faults.Fault(exc.HTTPUnprocessableEntity()) @@ -182,7 +182,10 @@ class Controller(wsgi.Controller): def update(self, req, id): """ Updates the server name or password """ - inst_dict = self._deserialize(req.body, req) + if len(req.body) == 0: + raise exc.HTTPUnprocessableEntity() + + inst_dict = self._deserialize(req.body, req.get_content_type()) if not inst_dict: return faults.Fault(exc.HTTPUnprocessableEntity()) @@ -205,7 +208,7 @@ class Controller(wsgi.Controller): def action(self, req, id): """ Multi-purpose method used to reboot, rebuild, and resize a server """ - input_dict = self._deserialize(req.body, req) + input_dict = self._deserialize(req.body, req.get_content_type()) #TODO(sandy): rebuild/resize not supported. try: reboot_type = input_dict['reboot']['type'] diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index d5206da20..cf6cd789f 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -67,13 +67,13 @@ class Controller(wsgi.Controller): def create(self, req): context = req.environ['nova.context'] - env = self._deserialize(req.body, req) + env = self._deserialize(req.body, req.get_content_type()) zone = db.zone_create(context, env["zone"]) return dict(zone=_scrub_zone(zone)) def update(self, req, id): context = req.environ['nova.context'] - env = self._deserialize(req.body, req) + env = self._deserialize(req.body, req.get_content_type()) zone_id = int(id) zone = db.zone_update(context, zone_id, env["zone"]) return dict(zone=_scrub_zone(zone)) diff --git a/nova/exception.py b/nova/exception.py index 7d65bd6a5..93c5fe3d7 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -88,6 +88,10 @@ class InvalidInputException(Error): pass +class InvalidContentType(Error): + pass + + class TimeoutException(Error): pass diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 78beb7df9..fae08d0be 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -227,6 +227,7 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.0/servers') req.method = 'POST' req.body = json.dumps(body) + req.headers["Content-Type"] = "application/json" res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 555b206b9..d0da8eaaf 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -86,24 +86,27 @@ class ZonesTest(test.TestCase): def test_get_zone_list(self): req = webob.Request.blank('/v1.0/zones') + req.headers["Content-Type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) self.assertEqual(len(res_dict['zones']), 2) def test_get_zone_by_id(self): req = webob.Request.blank('/v1.0/zones/1') + req.headers["Content-Type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') self.assertFalse('password' in res_dict['zone']) - self.assertEqual(res.status_int, 200) def test_zone_delete(self): req = webob.Request.blank('/v1.0/zones/1') + req.headers["Content-Type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) @@ -112,13 +115,14 @@ class ZonesTest(test.TestCase): body = dict(zone=dict(api_url='http://blah.zoo', username='fred', password='fubar')) req = webob.Request.blank('/v1.0/zones') + req.headers["Content-Type"] = "application/json" req.method = 'POST' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo') self.assertFalse('username' in res_dict['zone']) @@ -126,13 +130,14 @@ class ZonesTest(test.TestCase): def test_zone_update(self): body = dict(zone=dict(username='zeb', password='sneaky')) req = webob.Request.blank('/v1.0/zones/1') + req.headers["Content-Type"] = "application/json" req.method = 'PUT' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') self.assertFalse('username' in res_dict['zone']) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index cf2d0e297..7c0135656 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -93,29 +93,29 @@ class ControllerTest(test.TestCase): result = request.get_response(self.TestRouter()) self.assertEqual(json.loads(result.body), {"test": {"id": "123"}}) - def test_content_type_from_accept_xml(self): + def test_response_content_type_from_accept_xml(self): request = webob.Request.blank('/tests/123') request.headers["Accept"] = "application/xml" result = request.get_response(self.TestRouter()) self.assertEqual(result.headers["Content-Type"], "application/xml") - def test_content_type_from_accept_json(self): + def test_response_content_type_from_accept_json(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/json" result = request.get_response(self.TestRouter()) self.assertEqual(result.headers["Content-Type"], "application/json") - def test_content_type_from_query_extension_xml(self): + def test_response_content_type_from_query_extension_xml(self): request = wsgi.Request.blank('/tests/123.xml') result = request.get_response(self.TestRouter()) self.assertEqual(result.headers["Content-Type"], "application/xml") - def test_content_type_from_query_extension_json(self): + def test_response_content_type_from_query_extension_json(self): request = wsgi.Request.blank('/tests/123.json') result = request.get_response(self.TestRouter()) self.assertEqual(result.headers["Content-Type"], "application/json") - def test_content_type_default_when_unsupported(self): + def test_response_content_type_default_when_unsupported(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.get_response(self.TestRouter()) @@ -125,6 +125,17 @@ class ControllerTest(test.TestCase): class RequestTest(test.TestCase): + def test_request_content_type_missing(self): + request = wsgi.Request.blank('/tests/123') + request.body = "" + self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) + + def test_request_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "text/html" + request.body = "asdf
" + self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) + def test_content_type_from_accept_xml(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml" @@ -173,40 +184,48 @@ class RequestTest(test.TestCase): self.assertEqual(result, "application/json") - - - class SerializerTest(test.TestCase): - def match(self, url, accept, expect): + def test_xml(self): input_dict = dict(servers=dict(a=(2, 3))) expected_xml = '(2,3)' + serializer = wsgi.Serializer() + result = serializer.serialize(input_dict, "application/xml") + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_xml) + + def test_json(self): + input_dict = dict(servers=dict(a=(2, 3))) expected_json = '{"servers":{"a":[2,3]}}' - req = webob.Request.blank(url, headers=dict(Accept=accept)) - result = wsgi.Serializer(req.environ).to_content_type(input_dict) + serializer = wsgi.Serializer() + result = serializer.serialize(input_dict, "application/json") result = result.replace('\n', '').replace(' ', '') - if expect == 'xml': - self.assertEqual(result, expected_xml) - elif expect == 'json': - self.assertEqual(result, expected_json) - else: - raise "Bad expect value" - - def test_basic(self): - self.match('/servers/4.json', None, expect='json') - self.match('/servers/4', 'application/json', expect='json') - self.match('/servers/4', 'application/xml', expect='xml') - self.match('/servers/4.xml', None, expect='xml') - - def test_defaults_to_json(self): - self.match('/servers/4', None, expect='json') - self.match('/servers/4', 'text/html', expect='json') - - def test_suffix_takes_precedence_over_accept_header(self): - self.match('/servers/4.xml', 'application/json', expect='xml') - self.match('/servers/4.xml.', 'application/json', expect='json') - - def test_deserialize(self): + self.assertEqual(result, expected_json) + + def test_unsupported_content_type(self): + serializer = wsgi.Serializer() + self.assertRaises(exception.InvalidContentType, serializer.serialize, + {}, "text/null") + + def test_deserialize_json(self): + data = """{"a": { + "a1": "1", + "a2": "2", + "bs": ["1", "2", "3", {"c": {"c1": "1"}}], + "d": {"e": "1"}, + "f": "1"}}""" + as_dict = dict(a={ + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': dict(c1='1')}], + 'd': {'e': '1'}, + 'f': '1'}) + metadata = {} + serializer = wsgi.Serializer(metadata) + self.assertEqual(serializer.deserialize(data, "application/json"), + as_dict) + + def test_deserialize_xml(self): xml = """ 123 @@ -221,11 +240,13 @@ class SerializerTest(test.TestCase): 'd': {'e': '1'}, 'f': '1'}) metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})} - serializer = wsgi.Serializer({}, metadata) - self.assertEqual(serializer.deserialize(xml), as_dict) + serializer = wsgi.Serializer(metadata) + self.assertEqual(serializer.deserialize(xml, "application/xml"), + as_dict) def test_deserialize_empty_xml(self): xml = """""" as_dict = {"a": {}} - serializer = wsgi.Serializer({}) - self.assertEqual(serializer.deserialize(xml), as_dict) + serializer = wsgi.Serializer() + self.assertEqual(serializer.deserialize(xml, "application/xml"), + as_dict) diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py index b6bfab534..85bfcfd85 100644 --- a/nova/tests/test_direct.py +++ b/nova/tests/test_direct.py @@ -59,6 +59,7 @@ class DirectTestCase(test.TestCase): req.headers['X-OpenStack-User'] = 'user1' req.headers['X-OpenStack-Project'] = 'proj1' resp = req.get_response(self.auth_router) + self.assertEqual(resp.status_int, 200) data = json.loads(resp.body) self.assertEqual(data['user'], 'user1') self.assertEqual(data['project'], 'proj1') @@ -69,6 +70,7 @@ class DirectTestCase(test.TestCase): req.method = 'POST' req.body = 'json=%s' % json.dumps({'data': 'foo'}) resp = req.get_response(self.router) + self.assertEqual(resp.status_int, 200) resp_parsed = json.loads(resp.body) self.assertEqual(resp_parsed['data'], 'foo') @@ -78,6 +80,7 @@ class DirectTestCase(test.TestCase): req.method = 'POST' req.body = 'data=foo' resp = req.get_response(self.router) + self.assertEqual(resp.status_int, 200) resp_parsed = json.loads(resp.body) self.assertEqual(resp_parsed['data'], 'foo') diff --git a/nova/wsgi.py b/nova/wsgi.py index 4577439cb..c3e08522d 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -36,6 +36,7 @@ import webob.exc from paste import deploy +from nova import exception from nova import flags from nova import log as logging from nova import utils @@ -102,6 +103,14 @@ class Request(webob.Request): return bm or "application/json" + def get_content_type(self): + try: + ct = self.headers["Content-Type"] + assert ct in ("application/xml", "application/json") + return ct + except Exception: + raise webob.exc.HTTPBadRequest("Invalid content type") + class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @@ -343,30 +352,41 @@ class Controller(object): del arg_dict['format'] arg_dict['req'] = req result = method(**arg_dict) + if type(result) is dict: - return self._serialize(result, req) + content_type = req.best_match() + body = self._serialize(result, content_type) + + response = webob.Response() + response.headers["Content-Type"] = content_type + response.body = body + return response + else: return result - def _serialize(self, data, request): + def _serialize(self, data, content_type): """ - Serialize the given dict to the response type requested in request. + Serialize the given dict to the provided content_type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), "_serialization_metadata", {}) - serializer = Serializer(request.environ, _metadata) - return serializer.to_content_type(data) + serializer = Serializer(_metadata) + try: + return serializer.serialize(data, content_type) + except exception.InvalidContentType: + raise webob.exc.HTTPNotAcceptable() - def _deserialize(self, data, request): + def _deserialize(self, data, content_type): """ - Deserialize the request body to the response type requested in request. + Deserialize the request body to the specefied content type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), "_serialization_metadata", {}) - serializer = Serializer(request.environ, _metadata) - return serializer.deserialize(data) + serializer = Serializer(_metadata) + return serializer.deserialize(data, content_type) class Serializer(object): @@ -374,50 +394,52 @@ class Serializer(object): Serializes and deserializes dictionaries to certain MIME types. """ - def __init__(self, environ, metadata=None): + def __init__(self, metadata=None): """ Create a serializer based on the given WSGI environment. 'metadata' is an optional dict mapping MIME types to information needed to serialize a dictionary to that type. """ self.metadata = metadata or {} - req = Request.blank('', environ) - suffix = req.path_info.split('.')[-1].lower() - if suffix == 'json': - self.handler = self._to_json - elif suffix == 'xml': - self.handler = self._to_xml - elif 'application/json' in req.accept: - self.handler = self._to_json - elif 'application/xml' in req.accept: - self.handler = self._to_xml - else: - # This is the default - self.handler = self._to_json - def to_content_type(self, data): - """ - Serialize a dictionary into a string. + def _get_serialize_handler(self, content_type): + handlers = { + "application/json": self._to_json, + "application/xml": self._to_xml, + } + + try: + return handlers[content_type] + except Exception: + raise exception.InvalidContentType() - The format of the string will be decided based on the Content Type - requested in self.environ: by Accept: header, or by URL suffix. + def serialize(self, data, content_type): + """ + Serialize a dictionary into a string of the specified content type. """ - return self.handler(data) + return self._get_serialize_handler(content_type)(data) - def deserialize(self, datastring): + def deserialize(self, datastring, content_type): """ Deserialize a string to a dictionary. The string must be in the format of a supported MIME type. """ - datastring = datastring.strip() + return self.get_deserialize_handler(content_type)(datastring) + + def get_deserialize_handler(self, content_type): + handlers = { + "application/json": self._from_json, + "application/xml": self._from_xml, + } + try: - is_xml = (datastring[0] == '<') - if not is_xml: - return utils.loads(datastring) - return self._from_xml(datastring) - except: - return None + return handlers[content_type] + except Exception: + raise exception.InvalidContentType() + + def _from_json(self, datastring): + return utils.loads(datastring) def _from_xml(self, datastring): xmldata = self.metadata.get('application/xml', {}) -- cgit From 1d8914fc752f7182f942cdd40f2ba18baedeed0c Mon Sep 17 00:00:00 2001 From: Cerberus Date: Fri, 4 Mar 2011 11:19:35 -0600 Subject: More fixes --- nova/api/openstack/servers.py | 4 ++-- nova/compute/api.py | 8 ++++---- nova/compute/manager.py | 2 +- nova/tests/xenapi/stubs.py | 2 +- nova/virt/xenapi/vm_utils.py | 16 ++++++++-------- nova/virt/xenapi/vmops.py | 11 ++++++----- 6 files changed, 22 insertions(+), 21 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ceb17c9e4..c2bf42b72 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -203,8 +203,8 @@ class Controller(wsgi.Controller): return exc.HTTPNoContent() def action(self, req, id): - """ Multi-purpose method used to reboot, rebuild, or - resize a server """ + """Multi-purpose method used to reboot, rebuild, or + resize a server""" actions = { 'reboot': self._action_reboot, diff --git a/nova/compute/api.py b/nova/compute/api.py index bfa5c7dba..ce9da727d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -430,8 +430,8 @@ class API(base.Base): migration_ref = self.db.migration_get_by_instance_and_status(context, instance_id, 'finished') if not migration_ref: - raise exception.NotFound(_("No finished migrations found for \ - instance")) + raise exception.NotFound(_("No finished migrations found for " + "instance")) params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, instance_id, @@ -444,8 +444,8 @@ class API(base.Base): migration_ref = self.db.migration_get_by_instance_and_status(context, instance_id, 'finished') if not migration_ref: - raise exception.NotFound(_("No finished migrations found for \ - instance")) + raise exception.NotFound(_("No finished migrations found for " + "instance")) instance_ref = self.db.instance_get(context, instance_id) params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, instance_id, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1c42b383c..b3e864154 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -414,7 +414,7 @@ class ComputeManager(manager.Manager): @exception.wrap_exception @checks_instance_lock def confirm_resize(self, context, instance_id, migration_id): - """ Destroys the source instance """ + """Destroys the source instance""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_get(context, migration_id) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index d17951b81..caefcff34 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -221,7 +221,7 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): class FakeSessionForMigrationTests(fake.SessionBase): - """ Stubs out a XenAPISession for Migration tests """ + """Stubs out a XenAPISession for Migration tests""" def __init__(self, uri): super(FakeSessionForMigrationTests, self).__init__(uri) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index ca2d634f1..eff207a51 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -270,8 +270,8 @@ class VMHelper(HelperBase): @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): - """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, - Snapshot VHD """ + """Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, + Snapshot VHD""" #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...") @@ -284,7 +284,7 @@ class VMHelper(HelperBase): original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref) task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) - template_vm_ref = session.wait_for_task(instance_id, task) + template_vm_ref = session.wait_for_task(task, instance_id) template_vdi_rec = cls.get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] @@ -302,14 +302,14 @@ class VMHelper(HelperBase): @classmethod def get_sr(cls, session, sr_label='slices'): - """ Finds the SR named by the given name label and returns - the UUID """ + """Finds the SR named by the given name label and returns + the UUID""" return session.call_xenapi('SR.get_by_name_label', sr_label)[0] @classmethod def get_sr_path(cls, session, sr_label='slices'): - """ Finds the SR and then coerces it into a path on the dom0 file - system """ + """Finds the SR and then coerces it into a path on the dom0 file + system""" return FLAGS.xenapi_sr_base_path + cls.get_sr(session, sr_label) @classmethod @@ -643,7 +643,7 @@ class VMHelper(HelperBase): if sr_ref: LOG.debug(_("Re-scanning SR %s"), sr_ref) task = session.call_xenapi('Async.SR.scan', sr_ref) - session.wait_for_task(instance_id, task) + session.wait_for_task(task, instance_id) @classmethod def scan_default_sr(cls, session): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 60ce51f4a..01bfa2dc5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -233,7 +233,7 @@ class VMOps(object): "start") def snapshot(self, instance, image_id): - """ Create snapshot from a running VM instance + """Create snapshot from a running VM instance :param instance: instance to be snapshotted :param image_id: id of image to upload to @@ -285,7 +285,7 @@ class VMOps(object): return def migrate_disk_and_power_off(self, instance, dest): - """ Copies a VHD from one host machine to another + """Copies a VHD from one host machine to another :param instance: the instance that owns the VHD in question :param dest: the destination host machine @@ -314,7 +314,7 @@ class VMOps(object): task = self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) - self._session.wait_for_task(instance.id, task) + self._session.wait_for_task(task, instance.id) # Now power down the instance and transfer the COW VHD self._shutdown(instance, vm_ref, method='clean') @@ -326,7 +326,7 @@ class VMOps(object): task = self._session.async_call_plugin('migration', 'transfer_vhd', {'params': pickle.dumps(params)}) - self._session.wait_for_task(instance.id, task) + self._session.wait_for_task(task, instance.id) finally: if template_vm_ref: @@ -338,6 +338,7 @@ class VMOps(object): return {'base_copy': base_copy_uuid, 'cow': cow_uuid} def attach_disk(self, instance, disk_info): + """Links the base copy VHD to the COW via the XAPI plugin""" vm_ref = VMHelper.lookup(self._session, instance.name) new_base_copy_uuid = str(uuid.uuid4()) new_cow_uuid = str(uuid.uuid4()) @@ -350,7 +351,7 @@ class VMOps(object): task = self._session.async_call_plugin('migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)}) - self._session.wait_for_task(instance.id, task) + self._session.wait_for_task(task, instance.id) # Now we rescan the SR so we find the VHDs VMHelper.scan_default_sr(self._session) -- cgit From 831f398653cc99253bfeeb232165d3f9c043bd0b Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Fri, 4 Mar 2011 14:01:25 -0500 Subject: Fix renaming of instance fields using update_instance method. --- nova/api/ec2/apirequest.py | 18 +++++++++++++++++- nova/api/ec2/cloud.py | 4 ++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 2b1acba5a..d7ad08d2f 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -52,7 +52,23 @@ def _database_to_isoformat(datetimeobj): def _try_convert(value): - """Return a non-string if possible""" + """Return a non-string from a string or unicode, if possible. + + ============= ===================================================== + When value is returns + ============= ===================================================== + zero-length '' + 'None' None + 'True' True + 'False' False + '0', '-0' 0 + 0xN, -0xN int from hex (postitive) (N is any number) + 0bN, -0bN int from binary (positive) (N is any number) + * try conversion to int, float, complex, fallback value + + """ + if len(value) == 0: + return '' if value == 'None': return None if value == 'True': diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index c6309f03c..0d22a3f46 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -838,14 +838,14 @@ class CloudController(object): self.compute_api.unrescue(context, instance_id=instance_id) return True - def update_instance(self, context, ec2_id, **kwargs): + def update_instance(self, context, instance_id, **kwargs): updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: if field in kwargs: changes[field] = kwargs[field] if changes: - instance_id = ec2_id_to_id(ec2_id) + instance_id = ec2_id_to_id(instance_id) self.compute_api.update(context, instance_id=instance_id, **kwargs) return True -- cgit From 1eed366b7508c0f225b2c9691e1f62a6f88ee3f8 Mon Sep 17 00:00:00 2001 From: Ricardo Carrillo Cruz Date: Fri, 4 Mar 2011 21:07:03 +0100 Subject: Added initial support to delete networks nova-manage --- bin/nova-manage | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/bin/nova-manage b/bin/nova-manage index 9bf3a1bb3..9557f2423 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -546,6 +546,16 @@ class NetworkCommands(object): network.dns) + def delete(self, fixed_range): + """Deletes a network""" + try: + network = [n for n in db.network_get_all(context.get_admin_context()) + if n.cidr == fixed_range][0] + + print network.id, network.cidr, network.project_id + except IndexError: + raise ValueError(_("Network does not exist")) + class ServiceCommands(object): """Enable and disable running services""" -- cgit From b3d3366b8fd4eaf81bb9e03ad808c1a139e5b5b0 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 7 Mar 2011 12:07:23 -0500 Subject: Generate 'adminPass' and call set_password when creating servers. --- nova/api/openstack/servers.py | 10 +++++++--- nova/tests/api/openstack/test_servers.py | 8 +++++++- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 08b95b46a..6cd8bb451 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -84,13 +84,11 @@ def _translate_detail_keys(inst): return dict(server=inst_dict) - def _translate_keys(inst): """ Coerces into dictionary format, excluding all model attributes save for id and name """ return dict(server=dict(id=inst['id'], name=inst['display_name'])) - class Controller(wsgi.Controller): """ The Server API controller for the OpenStack API """ @@ -178,7 +176,13 @@ class Controller(wsgi.Controller): key_data=key_pair['public_key'], metadata=metadata, onset_files=env.get('onset_files', [])) - return _translate_keys(instances[0]) + + server = _translate_keys(instances[0]) + password = "%s%s" % (server['server']['name'][:4], + utils.generate_password(12)) + server['server']['adminPass'] = password + self.compute_api.set_admin_password(context, server['server']['id']) + return server def update(self, req, id): """ Updates the server name or password """ diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 78beb7df9..16b48a13b 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -186,7 +186,7 @@ class ServersTest(test.TestCase): def test_create_instance(self): def instance_create(context, inst): - return {'id': '1', 'display_name': ''} + return {'id': '1', 'display_name': 'server_test'} def server_update(context, id, params): return instance_create(context, id) @@ -230,6 +230,12 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) + server = json.loads(res.body)['server'] + self.assertEqual('serv', server['adminPass'][:4]) + self.assertEqual(16, len(server['adminPass'])) + self.assertEqual('server_test', server['name']) + self.assertEqual('1', server['id']) + self.assertEqual(res.status_int, 200) def test_update_no_body(self): -- cgit From a775c4eee279e11268a6cc447aee24c452e4665a Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Mon, 7 Mar 2011 17:17:41 +0000 Subject: Merge prop changes and test fixes --- nova/tests/xenapi/stubs.py | 26 +++++++++++++------------- nova/virt/xenapi/vm_utils.py | 30 ++++++++++++------------------ nova/virt/xenapi/vmops.py | 4 ++-- 3 files changed, 27 insertions(+), 33 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index caefcff34..11e89c9b4 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -227,18 +227,8 @@ class FakeSessionForMigrationTests(fake.SessionBase): def stub_out_migration_methods(stubs): - class FakeSnapshot(object): - def __getattr__(self, key): - return str(key) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - pass - def fake_get_snapshot(self, instance): - return FakeSnapshot() + return 'foo', 'bar' @classmethod def fake_get_vdi(cls, session, vm_ref): @@ -251,11 +241,21 @@ def stub_out_migration_methods(stubs): pass @classmethod - def fake_scan_sr(cls, session): + def fake_sr(cls, session, *args): + pass + + @classmethod + def fake_get_sr_path(cls, *args): + return "fake" + + def fake_destroy(*args, **kwargs): pass - stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_scan_sr) + stubs.Set(vmops.VMOps, '_destroy', fake_destroy) + stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr) + stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr) stubs.Set(vmops.VMOps, '_get_snapshot', fake_get_snapshot) stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi) stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None) + stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path) stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index eff207a51..80b7540d4 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -307,10 +307,16 @@ class VMHelper(HelperBase): return session.call_xenapi('SR.get_by_name_label', sr_label)[0] @classmethod - def get_sr_path(cls, session, sr_label='slices'): - """Finds the SR and then coerces it into a path on the dom0 file - system""" - return FLAGS.xenapi_sr_base_path + cls.get_sr(session, sr_label) + def get_sr_path(cls, session): + """Return the path to our storage repository + + This is used when we're dealing with VHDs directly, either by taking + snapshots or by restoring an image in the DISK_VHD format. + """ + sr_ref = safe_find_sr(session) + sr_rec = session.get_xenapi().SR.get_record(sr_ref) + sr_uuid = sr_rec["uuid"] + return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) @classmethod def upload_image(cls, session, instance_id, vdi_uuids, image_id): @@ -326,7 +332,7 @@ class VMHelper(HelperBase): 'image_id': image_id, 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port, - 'sr_path': get_sr_path(session)} + 'sr_path': cls.get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) @@ -369,7 +375,7 @@ class VMHelper(HelperBase): 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port, 'uuid_stack': uuid_stack, - 'sr_path': get_sr_path(session)} + 'sr_path': cls.get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) @@ -775,18 +781,6 @@ def find_sr(session): return None -def get_sr_path(session): - """Return the path to our storage repository - - This is used when we're dealing with VHDs directly, either by taking - snapshots or by restoring an image in the DISK_VHD format. - """ - sr_ref = safe_find_sr(session) - sr_rec = session.get_xenapi().SR.get_record(sr_ref) - sr_uuid = sr_rec["uuid"] - return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) - - def remap_vbd_dev(dev): """Return the appropriate location for a plugged-in VBD device diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 01bfa2dc5..b862c9de9 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -262,7 +262,7 @@ class VMOps(object): self._session, instance.id, template_vdi_uuids, image_id) finally: if template_vm_ref: - self.virt._destroy(self.instance, template_vm_ref, + self._destroy(instance, template_vm_ref, shutdown=False, destroy_kernel_ramdisk=False) logging.debug(_("Finished snapshot and upload for VM %s"), instance) @@ -330,7 +330,7 @@ class VMOps(object): finally: if template_vm_ref: - self.virt._destroy(self.instance, template_vm_ref, + self._destroy(instance, template_vm_ref, shutdown=False, destroy_kernel_ramdisk=False) # TODO(mdietz): we could also consider renaming these to something -- cgit From f72366f007239656d3d5e3fc80cd277758eedf9b Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Mon, 7 Mar 2011 19:33:24 +0000 Subject: Create --paste_config flag defaulting to api-paste.ini and mv etc/nova-api.conf to match --- bin/nova-api | 7 +++-- etc/api-paste.ini | 91 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ etc/nova-api.conf | 91 ------------------------------------------------------- 3 files changed, 96 insertions(+), 93 deletions(-) create mode 100644 etc/api-paste.ini delete mode 100644 etc/nova-api.conf diff --git a/bin/nova-api b/bin/nova-api index 14be4b841..0b2a44c88 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -43,6 +43,8 @@ from nova import wsgi LOG = logging.getLogger('nova.api') FLAGS = flags.FLAGS +flags.DEFINE_string('paste_config', "api-paste.ini", + 'File name for the paste.deploy config for nova-api') flags.DEFINE_string('ec2_listen', "0.0.0.0", 'IP address for EC2 API to listen') flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen') @@ -90,8 +92,9 @@ if __name__ == '__main__': for flag in FLAGS: flag_get = FLAGS.get(flag, None) LOG.debug("%(flag)s : %(flag_get)s" % locals()) - conf = wsgi.paste_config_file('nova-api.conf') + conf = wsgi.paste_config_file(FLAGS.paste_config) if conf: run_app(conf) else: - LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf') + LOG.error(_("No paste configuration found for: %s"), + FLAGS.paste_config) diff --git a/etc/api-paste.ini b/etc/api-paste.ini new file mode 100644 index 000000000..9f7e93d4c --- /dev/null +++ b/etc/api-paste.ini @@ -0,0 +1,91 @@ +####### +# EC2 # +####### + +[composite:ec2] +use = egg:Paste#urlmap +/: ec2versions +/services/Cloud: ec2cloud +/services/Admin: ec2admin +/latest: ec2metadata +/2007-01-19: ec2metadata +/2007-03-01: ec2metadata +/2007-08-29: ec2metadata +/2007-10-10: ec2metadata +/2007-12-15: ec2metadata +/2008-02-01: ec2metadata +/2008-09-01: ec2metadata +/2009-04-04: ec2metadata +/1.0: ec2metadata + +[pipeline:ec2cloud] +pipeline = logrequest authenticate cloudrequest authorizer ec2executor +#pipeline = logrequest ec2lockout authenticate cloudrequest authorizer ec2executor + +[pipeline:ec2admin] +pipeline = logrequest authenticate adminrequest authorizer ec2executor + +[pipeline:ec2metadata] +pipeline = logrequest ec2md + +[pipeline:ec2versions] +pipeline = logrequest ec2ver + +[filter:logrequest] +paste.filter_factory = nova.api.ec2:RequestLogging.factory + +[filter:ec2lockout] +paste.filter_factory = nova.api.ec2:Lockout.factory + +[filter:authenticate] +paste.filter_factory = nova.api.ec2:Authenticate.factory + +[filter:cloudrequest] +controller = nova.api.ec2.cloud.CloudController +paste.filter_factory = nova.api.ec2:Requestify.factory + +[filter:adminrequest] +controller = nova.api.ec2.admin.AdminController +paste.filter_factory = nova.api.ec2:Requestify.factory + +[filter:authorizer] +paste.filter_factory = nova.api.ec2:Authorizer.factory + +[app:ec2executor] +paste.app_factory = nova.api.ec2:Executor.factory + +[app:ec2ver] +paste.app_factory = nova.api.ec2:Versions.factory + +[app:ec2md] +paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory + +############# +# Openstack # +############# + +[composite:osapi] +use = egg:Paste#urlmap +/: osversions +/v1.0: openstackapi + +[pipeline:openstackapi] +pipeline = faultwrap auth ratelimit osapiapp + +[filter:faultwrap] +paste.filter_factory = nova.api.openstack:FaultWrapper.factory + +[filter:auth] +paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory + +[filter:ratelimit] +paste.filter_factory = nova.api.openstack.ratelimiting:RateLimitingMiddleware.factory + +[app:osapiapp] +paste.app_factory = nova.api.openstack:APIRouter.factory + +[pipeline:osversions] +pipeline = faultwrap osversionapp + +[app:osversionapp] +paste.app_factory = nova.api.openstack:Versions.factory diff --git a/etc/nova-api.conf b/etc/nova-api.conf deleted file mode 100644 index 9f7e93d4c..000000000 --- a/etc/nova-api.conf +++ /dev/null @@ -1,91 +0,0 @@ -####### -# EC2 # -####### - -[composite:ec2] -use = egg:Paste#urlmap -/: ec2versions -/services/Cloud: ec2cloud -/services/Admin: ec2admin -/latest: ec2metadata -/2007-01-19: ec2metadata -/2007-03-01: ec2metadata -/2007-08-29: ec2metadata -/2007-10-10: ec2metadata -/2007-12-15: ec2metadata -/2008-02-01: ec2metadata -/2008-09-01: ec2metadata -/2009-04-04: ec2metadata -/1.0: ec2metadata - -[pipeline:ec2cloud] -pipeline = logrequest authenticate cloudrequest authorizer ec2executor -#pipeline = logrequest ec2lockout authenticate cloudrequest authorizer ec2executor - -[pipeline:ec2admin] -pipeline = logrequest authenticate adminrequest authorizer ec2executor - -[pipeline:ec2metadata] -pipeline = logrequest ec2md - -[pipeline:ec2versions] -pipeline = logrequest ec2ver - -[filter:logrequest] -paste.filter_factory = nova.api.ec2:RequestLogging.factory - -[filter:ec2lockout] -paste.filter_factory = nova.api.ec2:Lockout.factory - -[filter:authenticate] -paste.filter_factory = nova.api.ec2:Authenticate.factory - -[filter:cloudrequest] -controller = nova.api.ec2.cloud.CloudController -paste.filter_factory = nova.api.ec2:Requestify.factory - -[filter:adminrequest] -controller = nova.api.ec2.admin.AdminController -paste.filter_factory = nova.api.ec2:Requestify.factory - -[filter:authorizer] -paste.filter_factory = nova.api.ec2:Authorizer.factory - -[app:ec2executor] -paste.app_factory = nova.api.ec2:Executor.factory - -[app:ec2ver] -paste.app_factory = nova.api.ec2:Versions.factory - -[app:ec2md] -paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory - -############# -# Openstack # -############# - -[composite:osapi] -use = egg:Paste#urlmap -/: osversions -/v1.0: openstackapi - -[pipeline:openstackapi] -pipeline = faultwrap auth ratelimit osapiapp - -[filter:faultwrap] -paste.filter_factory = nova.api.openstack:FaultWrapper.factory - -[filter:auth] -paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory - -[filter:ratelimit] -paste.filter_factory = nova.api.openstack.ratelimiting:RateLimitingMiddleware.factory - -[app:osapiapp] -paste.app_factory = nova.api.openstack:APIRouter.factory - -[pipeline:osversions] -pipeline = faultwrap osversionapp - -[app:osversionapp] -paste.app_factory = nova.api.openstack:Versions.factory -- cgit From bcb18ee3d0d095b616c0909c92a151a599d4e17f Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Mon, 7 Mar 2011 15:05:07 -0500 Subject: Invalid values for offset and limit params in http requests now return a 400 response with a useful message in the body. Also added and updated tests. --- nova/api/openstack/common.py | 11 +++++++---- nova/tests/api/openstack/test_common.py | 20 ++++---------------- nova/tests/api/openstack/test_servers.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 20 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 9f85c5c8a..f7a9cc3f0 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -36,15 +36,18 @@ def limited(items, request, max_limit=1000): try: offset = int(request.GET.get('offset', 0)) except ValueError: - offset = 0 + raise webob.exc.HTTPBadRequest(_('offset param must be an integer')) try: limit = int(request.GET.get('limit', max_limit)) except ValueError: - limit = max_limit + raise webob.exc.HTTPBadRequest(_('limit param must be an integer')) - if offset < 0 or limit < 0: - raise webob.exc.HTTPBadRequest() + if limit < 0: + raise webob.exc.HTTPBadRequest(_('limit param must be positive')) + + if offset < 0: + raise webob.exc.HTTPBadRequest(_('offset param must be positive')) limit = min(max_limit, limit or max_limit) range_end = offset + limit diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 92023362c..8f57c5b67 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -79,20 +79,14 @@ class LimiterTest(test.TestCase): Test offset key works with a blank offset. """ req = Request.blank('/?offset=') - self.assertEqual(limited(self.tiny, req), self.tiny) - self.assertEqual(limited(self.small, req), self.small) - self.assertEqual(limited(self.medium, req), self.medium) - self.assertEqual(limited(self.large, req), self.large[:1000]) + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) def test_limiter_offset_bad(self): """ Test offset key works with a BAD offset. """ req = Request.blank(u'/?offset=\u0020aa') - self.assertEqual(limited(self.tiny, req), self.tiny) - self.assertEqual(limited(self.small, req), self.small) - self.assertEqual(limited(self.medium, req), self.medium) - self.assertEqual(limited(self.large, req), self.large[:1000]) + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) def test_limiter_nothing(self): """ @@ -166,18 +160,12 @@ class LimiterTest(test.TestCase): """ Test a negative limit. """ - def _limit_large(): - limited(self.large, req, max_limit=2000) - req = Request.blank('/?limit=-3000') - self.assertRaises(webob.exc.HTTPBadRequest, _limit_large) + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) def test_limiter_negative_offset(self): """ Test a negative offset. """ - def _limit_large(): - limited(self.large, req, max_limit=2000) - req = Request.blank('/?offset=-30') - self.assertRaises(webob.exc.HTTPBadRequest, _limit_large) + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 78beb7df9..10fb2bafb 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -184,6 +184,34 @@ class ServersTest(test.TestCase): self.assertEqual(s.get('imageId', None), None) i += 1 + def test_get_servers_with_limit(self): + req = webob.Request.blank('/v1.0/servers?limit=3') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [0, 1, 2]) + + req = webob.Request.blank('/v1.0/servers?limit=aaa') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + self.assertTrue('limit' in res.body) + + def test_get_servers_with_offset(self): + req = webob.Request.blank('/v1.0/servers?offset=2') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [2, 3, 4]) + + req = webob.Request.blank('/v1.0/servers?offset=aaa') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + self.assertTrue('offset' in res.body) + + def test_get_servers_with_limit_and_offset(self): + req = webob.Request.blank('/v1.0/servers?limit=2&offset=1') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [1, 2]) + def test_create_instance(self): def instance_create(context, inst): return {'id': '1', 'display_name': ''} -- cgit From 0abd5bfecd279272e5fe1b0de04478909cd77010 Mon Sep 17 00:00:00 2001 From: Ricardo Carrillo Cruz Date: Mon, 7 Mar 2011 22:18:15 +0100 Subject: added network_get_by_cidr method to nova.db api --- bin/nova-manage | 9 +-------- nova/db/api.py | 7 +++++++ nova/db/sqlalchemy/api.py | 18 ++++++++++++++++++ 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 9557f2423..b274c5bd1 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -545,16 +545,9 @@ class NetworkCommands(object): network.dhcp_start, network.dns) - def delete(self, fixed_range): """Deletes a network""" - try: - network = [n for n in db.network_get_all(context.get_admin_context()) - if n.cidr == fixed_range][0] - - print network.id, network.cidr, network.project_id - except IndexError: - raise ValueError(_("Network does not exist")) + print db.network_get_by_cidr(context.get_admin_context(), fixed_range) class ServiceCommands(object): """Enable and disable running services""" diff --git a/nova/db/api.py b/nova/db/api.py index d23f14a3c..c73796487 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -459,6 +459,10 @@ def network_associate(context, project_id): """Associate a free network to a project.""" return IMPL.network_associate(context, project_id) +def network_is_associated(context, project_id): + """Returns true the the network is associated to a project""" + return IMPL.network_is_associated(context, project_id) + def network_count(context): """Return the number of networks.""" @@ -525,6 +529,9 @@ def network_get_by_bridge(context, bridge): """Get a network by bridge or raise if it does not exist.""" return IMPL.network_get_by_bridge(context, bridge) +def network_get_by_cidr(context, cidr): + """Get a network by cidr or raise if it does not exist""" + return IMPL.network_get_by_cidr(context, cidr) def network_get_by_instance(context, instance_id): """Get a network by instance id or raise if it does not exist.""" diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 919dda118..bd2de70c7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -993,6 +993,13 @@ def network_associate(context, project_id): return network_ref +@require_admin_context +def network_is_associated(context, project_id): + session = get_session() + network = session.query(models.Network.project_id).filter(project_id=1).first() + print network + + @require_admin_context def network_count(context): session = get_session() @@ -1116,6 +1123,17 @@ def network_get_by_bridge(context, bridge): return result +@require_admin_context +def network_get_by_cidr(context, cidr): + session = get_session() + result = session.query(models.Network).\ + filter_by(cidr=cidr).first() + + if not result: + raise exception.NotFound(_('Network with cidr %s does not exist') % + cidr) + return result.id + @require_admin_context def network_get_by_instance(_context, instance_id): session = get_session() -- cgit From 56ee811efd52d0971d7fea4c232a904b3ee78ac6 Mon Sep 17 00:00:00 2001 From: Ricardo Carrillo Cruz Date: Mon, 7 Mar 2011 22:37:26 +0100 Subject: deleted network_is_associated from nova.db api --- bin/nova-manage | 4 ++-- nova/db/api.py | 5 ----- nova/db/sqlalchemy/api.py | 9 +-------- 3 files changed, 3 insertions(+), 15 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index b274c5bd1..94b0d5946 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -547,8 +547,8 @@ class NetworkCommands(object): def delete(self, fixed_range): """Deletes a network""" - print db.network_get_by_cidr(context.get_admin_context(), fixed_range) - + network = db.network_get_by_cidr(context.get_admin_context(), fixed_range) + class ServiceCommands(object): """Enable and disable running services""" diff --git a/nova/db/api.py b/nova/db/api.py index c73796487..04f5fd72f 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -459,11 +459,6 @@ def network_associate(context, project_id): """Associate a free network to a project.""" return IMPL.network_associate(context, project_id) -def network_is_associated(context, project_id): - """Returns true the the network is associated to a project""" - return IMPL.network_is_associated(context, project_id) - - def network_count(context): """Return the number of networks.""" return IMPL.network_count(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index bd2de70c7..c8f42425d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -993,13 +993,6 @@ def network_associate(context, project_id): return network_ref -@require_admin_context -def network_is_associated(context, project_id): - session = get_session() - network = session.query(models.Network.project_id).filter(project_id=1).first() - print network - - @require_admin_context def network_count(context): session = get_session() @@ -1132,7 +1125,7 @@ def network_get_by_cidr(context, cidr): if not result: raise exception.NotFound(_('Network with cidr %s does not exist') % cidr) - return result.id + return result @require_admin_context def network_get_by_instance(_context, instance_id): -- cgit From f79220a1f6a12621463b410d26e31e29a9e6ea3e Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 7 Mar 2011 15:41:37 -0600 Subject: cleaned up virt.xenapi.vmops._get_vm_opaque_ref. more reliable approach to checking if param is an opaque ref. code is cleaner --- nova/virt/xenapi/vmops.py | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b862c9de9..b1671fde4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -188,30 +188,32 @@ class VMOps(object): """Refactored out the common code of many methods that receive either a vm name or a vm instance, and want a vm instance in return. """ - vm = None - try: - if instance_or_vm.startswith("OpaqueRef:"): - # Got passed an opaque ref; return it + # if instance_or_vm is a string it must be opaque ref or instance name + if isinstance(instance_or_vm, str): + vm_rec = self._session.get_xenapi().VM.get_record(instance_or_vm) + if vm_rec != None: + # an opaque ref was passed in, return it return instance_or_vm else: - # Must be the instance name + # it must be an instance name instance_name = instance_or_vm - except (AttributeError, KeyError): - # Note the the KeyError will only happen with fakes.py - # Not a string; must be an ID or a vm instance - if isinstance(instance_or_vm, (int, long)): - ctx = context.get_admin_context() - try: - instance_obj = db.instance_get(ctx, instance_or_vm) - instance_name = instance_obj.name - except exception.NotFound: - # The unit tests screw this up, as they use an integer for - # the vm name. I'd fix that up, but that's a matter for - # another bug report. So for now, just try with the passed - # value - instance_name = instance_or_vm - else: - instance_name = instance_or_vm.name + + # if instance_or_vm is an int/long it must be instance id + elif isinstance(instance_or_vm, (int, long)): + ctx = context.get_admin_context() + try: + instance_obj = db.instance_get(ctx, instance_or_vm) + instance_name = instance_obj.name + except exception.NotFound: + # The unit tests screw this up, as they use an integer for + # the vm name. I'd fix that up, but that's a matter for + # another bug report. So for now, just try with the passed + # value + instance_name = instance_or_vm + + # otherwise instance_or_vm is an instance object + else: + instance_name = instance_or_vm.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise exception.NotFound( -- cgit From 59f73e3180731cec644b590d448e0da74711ae03 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 7 Mar 2011 16:11:10 -0600 Subject: virt.xenapi.vmops._get_vm_opaque_ref exception caught properly --- nova/virt/xenapi/vmops.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b1671fde4..ae4609418 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -190,13 +190,16 @@ class VMOps(object): """ # if instance_or_vm is a string it must be opaque ref or instance name if isinstance(instance_or_vm, str): - vm_rec = self._session.get_xenapi().VM.get_record(instance_or_vm) - if vm_rec != None: - # an opaque ref was passed in, return it - return instance_or_vm - else: - # it must be an instance name - instance_name = instance_or_vm + ref = None + try: + ref = self._session.get_xenapi().VM.get_record(instance_or_vm) + if ref != None: + # an opaque ref was passed in, return it + return instance_or_vm + except: + pass + # wasn't an opaque ref, must be an instance name + instance_name = instance_or_vm # if instance_or_vm is an int/long it must be instance id elif isinstance(instance_or_vm, (int, long)): -- cgit From 3fc6b8cbbd1be5baffc300112a0e39a807209c36 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 7 Mar 2011 16:34:59 -0600 Subject: virt.xenapi.vmops._get_vm_opaque_ref checks for basestring instance instead of str --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index ae4609418..30fa5bdd7 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -189,7 +189,7 @@ class VMOps(object): a vm name or a vm instance, and want a vm instance in return. """ # if instance_or_vm is a string it must be opaque ref or instance name - if isinstance(instance_or_vm, str): + if isinstance(instance_or_vm, basestring): ref = None try: ref = self._session.get_xenapi().VM.get_record(instance_or_vm) -- cgit From 88c5555e867c730065c18541a35b161eb861b502 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Mon, 7 Mar 2011 16:40:19 -0600 Subject: First part of the bug fix --- nova/compute/manager.py | 4 +--- nova/virt/xenapi/vmops.py | 8 +++++++- nova/virt/xenapi_conn.py | 9 +++++++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b3e864154..b35216dd3 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -510,9 +510,7 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, migration_ref['instance_id']) - # this may get passed into the following spawn instead - new_disk_info = self.driver.attach_disk(instance_ref, disk_info) - self.driver.spawn(instance_ref, disk=new_disk_info) + self.driver.finish_resize(instance_ref, disk_info) self.db.migration_update(context, migration_id, {'status': 'finished', }) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b862c9de9..37f513599 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -72,7 +72,13 @@ class VMOps(object): LOG.debug(_("Starting instance %s"), instance.name) self._session.call_xenapi('VM.start', vm_ref, False, False) - def spawn(self, instance, disk): + def spawn(self, instance): + self._spawn(instance, disk=None) + + def spawn_with_disk(self, instance, disk): + self._spawn(instance, disk=disk) + + def _spawn(self, instance, disk): """Create VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 62e17e851..7e8f825e9 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -154,9 +154,14 @@ class XenAPIConnection(object): """List VM instances""" return self._vmops.list_instances() - def spawn(self, instance, disk=None): + def spawn(self, instance): """Create VM instance""" - self._vmops.spawn(instance, disk) + self._vmops.spawn(instance) + + def finish_resize(self, instance, disk_info) + """Completes a resize, turning on the migrated instance""" + new_disk_info = self.attach_disk(instance, disk_info) + self._vmops.spawn_with_disk(instance, new_disk_info) def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ -- cgit From ede88283729663f11d913cc54bcf8ee08028d98f Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 7 Mar 2011 14:42:36 -0800 Subject: A few formatting niceties --- nova/service.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nova/service.py b/nova/service.py index 8fdaca0a5..389a6b2df 100644 --- a/nova/service.py +++ b/nova/service.py @@ -42,6 +42,7 @@ from nova import utils from nova import version from nova import wsgi + FLAGS = flags.FLAGS flags.DEFINE_integer('report_interval', 10, 'seconds between nodes reporting state to datastore', @@ -271,6 +272,11 @@ def serve(*services): x.start() +def wait(): + while True: + greenthread.sleep(5) + + def serve_wsgi(cls, conf): try: service = cls.create(conf) @@ -290,11 +296,6 @@ def serve_wsgi(cls, conf): return service -def wait(): - while True: - greenthread.sleep(5) - - def _run_wsgi(paste_config_file, apis): logging.debug(_("Using paste.deploy config at: %s"), paste_config_file) apps = [] -- cgit From 5c7ee13b058fb954fd9bbc4a3550716b8faa0b97 Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Mon, 7 Mar 2011 22:50:35 +0000 Subject: And unit tests --- nova/tests/test_xenapi.py | 5 +++++ nova/tests/xenapi/stubs.py | 4 ++++ nova/virt/xenapi_conn.py | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 7f437c2b8..6e458558d 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -383,6 +383,11 @@ class XenAPIMigrateInstance(test.TestCase): conn = xenapi_conn.get_connection(False) conn.attach_disk(instance, {'base_copy': 'hurr', 'cow': 'durr'}) + def test_finish_resize(self): + instance = db.instance_create(self.values) + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + conn = xenapi_conn.get_connection(False) + conn.finish_resize(instance, dict(base_copy='hurr', cow='durr')) class XenAPIDetermineDiskImageTestCase(test.TestCase): """ diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 11e89c9b4..28037c2ba 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -251,6 +251,9 @@ def stub_out_migration_methods(stubs): def fake_destroy(*args, **kwargs): pass + def fake_spawn_with_disk(*args, **kwargs): + pass + stubs.Set(vmops.VMOps, '_destroy', fake_destroy) stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr) stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr) @@ -259,3 +262,4 @@ def stub_out_migration_methods(stubs): stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None) stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path) stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown) + stubs.Set(vmops.VMOps, 'spawn_with_disk', fake_spawn_with_disk) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 7e8f825e9..3991496b2 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -158,7 +158,7 @@ class XenAPIConnection(object): """Create VM instance""" self._vmops.spawn(instance) - def finish_resize(self, instance, disk_info) + def finish_resize(self, instance, disk_info): """Completes a resize, turning on the migrated instance""" new_disk_info = self.attach_disk(instance, disk_info) self._vmops.spawn_with_disk(instance, new_disk_info) -- cgit From 2f0845b7b80081d18ee268b94fe38326f3c5401e Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Mon, 7 Mar 2011 23:07:05 +0000 Subject: A few more changes --- nova/tests/test_xenapi.py | 6 ------ nova/virt/xenapi/vmops.py | 10 +++++----- nova/virt/xenapi_conn.py | 9 +++------ 3 files changed, 8 insertions(+), 17 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 6e458558d..f5b154a51 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -377,12 +377,6 @@ class XenAPIMigrateInstance(test.TestCase): conn = xenapi_conn.get_connection(False) conn.migrate_disk_and_power_off(instance, '127.0.0.1') - def test_attach_disk(self): - instance = db.instance_create(self.values) - stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) - conn = xenapi_conn.get_connection(False) - conn.attach_disk(instance, {'base_copy': 'hurr', 'cow': 'durr'}) - def test_finish_resize(self): instance = db.instance_create(self.values) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 37f513599..e658de7f3 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -75,8 +75,8 @@ class VMOps(object): def spawn(self, instance): self._spawn(instance, disk=None) - def spawn_with_disk(self, instance, disk): - self._spawn(instance, disk=disk) + def spawn_with_disk(self, instance, vdi_uuid): + self._spawn(instance, disk=vdi_uuid) def _spawn(self, instance, disk): """Create VM instance""" @@ -343,14 +343,14 @@ class VMOps(object): # sensible so we don't need to blindly pass around dictionaries return {'base_copy': base_copy_uuid, 'cow': cow_uuid} - def attach_disk(self, instance, disk_info): + def attach_disk(self, instance, base_copy_uuid, cow_uuid): """Links the base copy VHD to the COW via the XAPI plugin""" vm_ref = VMHelper.lookup(self._session, instance.name) new_base_copy_uuid = str(uuid.uuid4()) new_cow_uuid = str(uuid.uuid4()) params = {'instance_id': instance.id, - 'old_base_copy_uuid': disk_info['base_copy'], - 'old_cow_uuid': disk_info['cow'], + 'old_base_copy_uuid': base_copy_uuid, + 'old_cow_uuid': cow_uuid, 'new_base_copy_uuid': new_base_copy_uuid, 'new_cow_uuid': new_cow_uuid, 'sr_path': VMHelper.get_sr_path(self._session), } diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 3991496b2..9965accad 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -160,8 +160,9 @@ class XenAPIConnection(object): def finish_resize(self, instance, disk_info): """Completes a resize, turning on the migrated instance""" - new_disk_info = self.attach_disk(instance, disk_info) - self._vmops.spawn_with_disk(instance, new_disk_info) + cow_uuid = self._vmops.attach_disk(instance, disk_info['base_copy'], + disk_info['cow']) + self._vmops.spawn_with_disk(instance, cow_uuid) def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ @@ -202,10 +203,6 @@ class XenAPIConnection(object): off the instance copies over the COW disk""" return self._vmops.migrate_disk_and_power_off(instance, dest) - def attach_disk(self, instance, disk_info): - """Moves the copied VDIs into the SR""" - return self._vmops.attach_disk(instance, disk_info) - def suspend(self, instance, callback): """suspend the specified instance""" self._vmops.suspend(instance, callback) -- cgit From 8e0fd37ddfbe88df296cf45583f0b3e4fa4d7a75 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 7 Mar 2011 15:22:59 -0800 Subject: Converted tabs to spaces in bin/nova-api --- bin/nova-api | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-api b/bin/nova-api index 2d2ef6d0c..c921ec45c 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -62,5 +62,5 @@ if __name__ == '__main__': LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf') sys.exit(1) else: - service = service.serve_wsgi(service.ApiService, conf) + service = service.serve_wsgi(service.ApiService, conf) service.wait() -- cgit From e69c802aaf40f3b90789aeef8bf3ef5dcbbcb2f3 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 7 Mar 2011 15:36:04 -0800 Subject: Moved FLAGS.paste_config to its re-usable location --- bin/nova-api | 14 +++----------- nova/service.py | 10 +++++++--- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index f48dbe5a5..85ca4eefd 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -45,9 +45,6 @@ from nova import wsgi LOG = logging.getLogger('nova.api') FLAGS = flags.FLAGS -flags.DEFINE_string('paste_config', "api-paste.ini", - 'File name for the paste.deploy config for nova-api') - if __name__ == '__main__': utils.default_flagfile() @@ -59,11 +56,6 @@ if __name__ == '__main__': for flag in FLAGS: flag_get = FLAGS.get(flag, None) LOG.debug("%(flag)s : %(flag_get)s" % locals()) - conf = wsgi.paste_config_file(FLAGS.paste_config) - if not conf: - LOG.error(_("No paste configuration found for: %s"), - FLAGS.paste_config) - sys.exit(1) - else: - service = service.serve_wsgi(service.ApiService, conf) - service.wait() + + service = service.serve_wsgi(service.ApiService) + service.wait() diff --git a/nova/service.py b/nova/service.py index 389a6b2df..5a8d58695 100644 --- a/nova/service.py +++ b/nova/service.py @@ -56,6 +56,8 @@ flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen') flags.DEFINE_string('osapi_listen', "0.0.0.0", 'IP address for OpenStack API to listen') flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen') +flags.DEFINE_string('paste_config', "api-paste.ini", + 'File name for the paste.deploy config for nova-api') class Service(object): @@ -238,9 +240,11 @@ class ApiService(WsgiService): @classmethod def create(cls, conf=None): if not conf: - conf = wsgi.paste_config_file('nova-api.conf') + conf = wsgi.paste_config_file(FLAGS.paste_config) if not conf: - raise exception.Error(_("Cannot load nova-api.conf")) + message = (_("No paste configuration found for: %s"), + FLAGS.paste_config) + raise exception.Error(message) api_endpoints = ['ec2', 'osapi'] service = cls(conf, api_endpoints) return service @@ -277,7 +281,7 @@ def wait(): greenthread.sleep(5) -def serve_wsgi(cls, conf): +def serve_wsgi(cls, conf=None): try: service = cls.create(conf) except Exception: -- cgit From e39995def6a2a11cdd430b0e6f603b493be5542b Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Mon, 7 Mar 2011 23:51:20 +0000 Subject: Some more refactoring and a tighter unit test --- nova/tests/test_xenapi.py | 14 ++++++++++---- nova/tests/xenapi/stubs.py | 15 +++++++++++++-- nova/virt/xenapi/vmops.py | 30 ++++++++++++++---------------- nova/virt/xenapi_conn.py | 4 ++-- 4 files changed, 39 insertions(+), 24 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index f5b154a51..919a38c06 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -360,16 +360,22 @@ class XenAPIMigrateInstance(test.TestCase): db_fakes.stub_out_db_instance_api(self.stubs) stubs.stub_out_get_target(self.stubs) xenapi_fake.reset() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') self.values = {'name': 1, 'id': 1, - 'project_id': 'fake', - 'user_id': 'fake', + 'project_id': self.project.id, + 'user_id': self.user.id, 'image_id': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, + 'kernel_id': None, + 'ramdisk_id': None, 'instance_type': 'm1.large', 'mac_address': 'aa:bb:cc:dd:ee:ff', } stubs.stub_out_migration_methods(self.stubs) + glance_stubs.stubout_glance_client(self.stubs, + glance_stubs.FakeGlance) def test_migrate_disk_and_power_off(self): instance = db.instance_create(self.values) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 28037c2ba..d8e358611 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -225,6 +225,17 @@ class FakeSessionForMigrationTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForMigrationTests, self).__init__(uri) + def VDI_get_by_uuid(*args): + return 'hurr' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + vm['is_a_template'] = False + vm['is_control_domain'] = False def stub_out_migration_methods(stubs): def fake_get_snapshot(self, instance): @@ -251,7 +262,7 @@ def stub_out_migration_methods(stubs): def fake_destroy(*args, **kwargs): pass - def fake_spawn_with_disk(*args, **kwargs): + def fake_reset_network(*args, **kwargs): pass stubs.Set(vmops.VMOps, '_destroy', fake_destroy) @@ -261,5 +272,5 @@ def stub_out_migration_methods(stubs): stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi) stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None) stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path) + stubs.Set(vmops.VMOps, 'reset_network', fake_reset_network) stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown) - stubs.Set(vmops.VMOps, 'spawn_with_disk', fake_spawn_with_disk) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index e658de7f3..7fe1f6ff0 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -72,13 +72,19 @@ class VMOps(object): LOG.debug(_("Starting instance %s"), instance.name) self._session.call_xenapi('VM.start', vm_ref, False, False) - def spawn(self, instance): - self._spawn(instance, disk=None) - - def spawn_with_disk(self, instance, vdi_uuid): - self._spawn(instance, disk=vdi_uuid) + def create_disk(self, instance): + user = AuthManager().get_user(instance.user_id) + project = AuthManager().get_project(instance.project_id) + disk_image_type = VMHelper.determine_disk_image_type(instance) + vdi_uuid = VMHelper.fetch_image(self._session, instance.id, + instance.image_id, user, project, disk_image_type) + return vdi_uuid - def _spawn(self, instance, disk): + def spawn(self, instance): + vdi_uuid = self.create_disk(instance) + self._spawn_with_disk(instance, vdi_uuid=vdi_uuid) + + def _spawn_with_disk(self, instance, vdi_uuid): """Create VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) @@ -101,17 +107,9 @@ class VMOps(object): vdi_ref = kernel = ramdisk = pv_kernel = None # Are we building from a pre-existing disk? - if not disk: - #if kernel is not present we must download a raw disk - - disk_image_type = VMHelper.determine_disk_image_type(instance) - vdi_uuid = VMHelper.fetch_image(self._session, instance.id, - instance.image_id, user, project, disk_image_type) - vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - - else: - vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', disk) + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + disk_image_type = VMHelper.determine_disk_image_type(instance) if disk_image_type == ImageType.DISK_RAW: # Have a look at the VDI and see if it has a PV kernel pv_kernel = VMHelper.lookup_image(self._session, instance.id, diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 9965accad..b63a5f8c3 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -160,9 +160,9 @@ class XenAPIConnection(object): def finish_resize(self, instance, disk_info): """Completes a resize, turning on the migrated instance""" - cow_uuid = self._vmops.attach_disk(instance, disk_info['base_copy'], + vdi_uuid = self._vmops.attach_disk(instance, disk_info['base_copy'], disk_info['cow']) - self._vmops.spawn_with_disk(instance, cow_uuid) + self._vmops._spawn_with_disk(instance, vdi_uuid) def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ -- cgit From 5ec9cbcdee3de3868a47ca5ec351a9a2594ceea2 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 7 Mar 2011 18:05:27 -0600 Subject: virt.xenapi.vmops._get_vm_opaque_ref assumes VM.get_record raises --- nova/virt/xenapi/vmops.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 30fa5bdd7..c0fbf96fc 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -192,14 +192,12 @@ class VMOps(object): if isinstance(instance_or_vm, basestring): ref = None try: + # check for opaque ref ref = self._session.get_xenapi().VM.get_record(instance_or_vm) - if ref != None: - # an opaque ref was passed in, return it - return instance_or_vm - except: - pass - # wasn't an opaque ref, must be an instance name - instance_name = instance_or_vm + return instance_or_vm + except self.XenAPI.Failure: + # wasn't an opaque ref, must be an instance name + instance_name = instance_or_vm # if instance_or_vm is an int/long it must be instance id elif isinstance(instance_or_vm, (int, long)): -- cgit From 4e8b6a14324ef2d1f550233cbcfc94c6363533d8 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 7 Mar 2011 18:46:44 -0600 Subject: virt.xenapi.vmops._get_vm_opaque_ref changed vm to vm_ref and ref to obj --- nova/virt/xenapi/vmops.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c0fbf96fc..0adabe7f5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -190,10 +190,10 @@ class VMOps(object): """ # if instance_or_vm is a string it must be opaque ref or instance name if isinstance(instance_or_vm, basestring): - ref = None + obj = None try: # check for opaque ref - ref = self._session.get_xenapi().VM.get_record(instance_or_vm) + obj = self._session.get_xenapi().VM.get_record(instance_or_vm) return instance_or_vm except self.XenAPI.Failure: # wasn't an opaque ref, must be an instance name @@ -215,11 +215,11 @@ class VMOps(object): # otherwise instance_or_vm is an instance object else: instance_name = instance_or_vm.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: + vm_ref = VMHelper.lookup(self._session, instance_name) + if vm_ref is None: raise exception.NotFound( _('Instance not present %s') % instance_name) - return vm + return vm_ref def _acquire_bootlock(self, vm): """Prevent an instance from booting""" -- cgit From cac5881eaa35f94e004c18dd34ca78014f067976 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Tue, 8 Mar 2011 01:01:41 -0500 Subject: execvp --- nova/crypto.py | 32 +-- nova/network/linux_net.py | 222 +++++++++++---------- nova/tests/test_network.py | 16 +- nova/utils.py | 16 +- nova/virt/disk.py | 44 ++-- nova/virt/images.py | 5 +- nova/virt/libvirt_conn.py | 36 ++-- nova/virt/xenapi/vm_utils.py | 11 +- nova/volume/driver.py | 71 +++---- .../networking/etc/xensource/scripts/vif_rules.py | 91 ++++++--- 10 files changed, 296 insertions(+), 248 deletions(-) diff --git a/nova/crypto.py b/nova/crypto.py index b240a3958..dd24723b8 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -105,8 +105,10 @@ def generate_key_pair(bits=1024): tmpdir = tempfile.mkdtemp() keyfile = os.path.join(tmpdir, 'temp') - utils.execute('ssh-keygen','-q','-b',"%d" % bits,'-N','""','-f',keyfile) - (out, err) = utils.execute('ssh-keygen','-q','-l','-f',"%s.pub" % (keyfile)) + utils.execute('ssh-keygen', '-q', '-b', '%d' % bits, '-N', '', + '-f', keyfile) + (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', + '%s.pub' % (keyfile)) fingerprint = out.split(' ')[1] private_key = open(keyfile).read() public_key = open(keyfile + '.pub').read() @@ -118,7 +120,7 @@ def generate_key_pair(bits=1024): # bio = M2Crypto.BIO.MemoryBuffer() # key.save_pub_key_bio(bio) # public_key = bio.read() - # public_key, err = execute('ssh-keygen','-y','-f','/dev/stdin', private_key) + # public_key, err = execute('ssh-keygen', '-y', '-f', '/dev/stdin', private_key) return (private_key, public_key, fingerprint) @@ -143,9 +145,10 @@ def revoke_cert(project_id, file_name): start = os.getcwd() os.chdir(ca_folder(project_id)) # NOTE(vish): potential race condition here - utils.execute('openssl','ca','-config','./openssl.cnf','-revoke',"'%s'" % file_name) - utils.execute('openssl','ca','-gencrl','-config','./openssl.cnf','-out',"'%s'" % - FLAGS.crl_file) + utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke', + '%s' % file_name) + utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf', + '-out', '%s' % FLAGS.crl_file) os.chdir(start) @@ -193,8 +196,9 @@ def generate_x509_cert(user_id, project_id, bits=1024): tmpdir = tempfile.mkdtemp() keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) csrfile = os.path.join(tmpdir, 'temp.csr') - utils.execute('openssl','genrsa','-out',keyfile,bits) - utils.execute('openssl','req','-new','-key',keyfile,'-out',csrfile,'-batch','-subj',subject) + utils.execute('openssl', 'genrsa', '-out', keyfile, bits) + utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out', csrfile, + '-batch', '-subj', subject) private_key = open(keyfile).read() csr = open(csrfile).read() shutil.rmtree(tmpdir) @@ -211,7 +215,8 @@ def _ensure_project_folder(project_id): if not os.path.exists(ca_path(project_id)): start = os.getcwd() os.chdir(ca_folder()) - utils.execute('sh','geninter.sh',project_id, _project_cert_subject(project_id)) + utils.execute('sh', 'geninter.sh', project_id, + _project_cert_subject(project_id)) os.chdir(start) @@ -226,7 +231,7 @@ def generate_vpn_files(project_id): start = os.getcwd() os.chdir(ca_folder()) # TODO(vish): the shell scripts could all be done in python - utils.execute('sh','genvpn.sh', + utils.execute('sh', 'genvpn.sh', project_id, _vpn_cert_subject(project_id)) with open(csr_fn, "r") as csrfile: csr_text = csrfile.read() @@ -257,9 +262,10 @@ def _sign_csr(csr_text, ca_folder): start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) - utils.execute('openssl','ca','-batch','-out',outbound,'-config' - './openssl.cnf','-infiles',inbound) - out, _err = utils.execute('openssl','x509','-in',outbound','-serial','-noout') + utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config', + './openssl.cnf', '-infiles', inbound) + out, _err = utils.execute('openssl', 'x509', '-in', outbound, + '-serial', '-noout') serial = out.rpartition("=")[2] os.chdir(start) with open(outbound, "r") as crtfile: diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 535ce87bc..ad019a8c0 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -65,113 +65,117 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24', def metadata_forward(): """Create forwarding rule for metadata""" - _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " - "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT " - "--to-destination %s:%s" % (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) + _confirm_rule("PREROUTING", '-t', 'nat', '-s', '0.0.0.0/0', + '-d', '169.254.169.254/32', '-p', 'tcp', '-m', 'tcp', + '--dport', '80', '-j', 'DNAT', + '--to-destination', '%s:%s' % (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) def init_host(): """Basic networking setup goes here""" if FLAGS.use_nova_chains: - _execute("sudo iptables -N nova_input", check_exit_code=False) - _execute("sudo iptables -D %s -j nova_input" % FLAGS.input_chain, + _execute('sudo', 'iptables', '-N', 'nova_input', check_exit_code=False) + _execute('sudo', 'iptables', '-D', FLAGS.input_chain, + '-j', 'nova_input', check_exit_code=False) - _execute("sudo iptables -A %s -j nova_input" % FLAGS.input_chain) - - _execute("sudo iptables -N nova_forward", check_exit_code=False) - _execute("sudo iptables -D FORWARD -j nova_forward", + _execute('sudo', 'iptables', '-A', FLAGS.input_chain, + '-j', 'nova_input') + _execute('sudo', 'iptables', '-N', 'nova_forward', check_exit_code=False) - _execute("sudo iptables -A FORWARD -j nova_forward") - - _execute("sudo iptables -N nova_output", check_exit_code=False) - _execute("sudo iptables -D OUTPUT -j nova_output", + _execute('sudo', 'iptables', '-D', 'FORWARD', '-j', 'nova_forward', check_exit_code=False) - _execute("sudo iptables -A OUTPUT -j nova_output") - - _execute("sudo iptables -t nat -N nova_prerouting", + _execute('sudo', 'iptables', '-A', 'FORWARD', '-j', 'nova_forward') + _execute('sudo', 'iptables', '-N', 'nova_output', check_exit_code=False) + _execute('sudo', 'iptables', '-D', 'OUTPUT', '-j', 'nova_output', check_exit_code=False) - _execute("sudo iptables -t nat -D PREROUTING -j nova_prerouting", + _execute('sudo', 'iptables', '-A', 'OUTPUT', '-j', 'nova_output') + _execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_prerouting', check_exit_code=False) - _execute("sudo iptables -t nat -A PREROUTING -j nova_prerouting") - - _execute("sudo iptables -t nat -N nova_postrouting", + _execute('sudo', 'iptables', '-t', 'nat', '-D', 'PREROUTING', + '-j', 'nova_prerouting', check_exit_code=False) + _execute('sudo', 'iptables', '-t', 'nat', '-A', 'PREROUTING', + '-j', 'nova_prerouting') + _execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_postrouting', check_exit_code=False) - _execute("sudo iptables -t nat -D POSTROUTING -j nova_postrouting", + _execute('sudo', 'iptables', '-t', 'nat', '-D', 'POSTROUTING', + '-j', 'nova_postrouting', check_exit_code=False) + _execute('sudo', 'iptables', '-t', 'nat', '-A', 'POSTROUTING', + '-j', 'nova_postrouting') + _execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_snatting', check_exit_code=False) - _execute("sudo iptables -t nat -A POSTROUTING -j nova_postrouting") - - _execute("sudo iptables -t nat -N nova_snatting", + _execute('sudo', 'iptables', '-t', 'nat', '-D', 'POSTROUTING', + '-j nova_snatting', check_exit_code=False) + _execute('sudo', 'iptables', '-t', 'nat', '-A', 'POSTROUTING', + '-j', 'nova_snatting') + _execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_output', check_exit_code=False) - _execute("sudo iptables -t nat -D POSTROUTING -j nova_snatting", - check_exit_code=False) - _execute("sudo iptables -t nat -A POSTROUTING -j nova_snatting") - - _execute("sudo iptables -t nat -N nova_output", check_exit_code=False) - _execute("sudo iptables -t nat -D OUTPUT -j nova_output", - check_exit_code=False) - _execute("sudo iptables -t nat -A OUTPUT -j nova_output") + _execute('sudo', 'iptables', '-t', 'nat', '-D', 'OUTPUT', + '-j nova_output', check_exit_code=False) + _execute('sudo', 'iptables', '-t', 'nat', '-A', 'OUTPUT', + '-j', 'nova_output') else: # NOTE(vish): This makes it easy to ensure snatting rules always # come after the accept rules in the postrouting chain - _execute("sudo iptables -t nat -N SNATTING", - check_exit_code=False) - _execute("sudo iptables -t nat -D POSTROUTING -j SNATTING", + _execute('sudo', 'iptables', '-t', 'nat', '-N', 'SNATTING', check_exit_code=False) - _execute("sudo iptables -t nat -A POSTROUTING -j SNATTING") + _execute('sudo', 'iptables', '-t', 'nat', '-D', 'POSTROUTING', + '-j', 'SNATTING', check_exit_code=False) + _execute('sudo', 'iptables', '-t', 'nat', '-A', 'POSTROUTING', + '-j', 'SNATTING') # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. - _confirm_rule("SNATTING", "-t nat -s %s " - "-j SNAT --to-source %s" - % (FLAGS.fixed_range, FLAGS.routing_source_ip), append=True) + _confirm_rule("SNATTING", '-t', 'nat', '-s', FLAGS.fixed_range, + '-j', 'SNAT', '--to-source', FLAGS.routing_source_ip, + append=True) - _confirm_rule("POSTROUTING", "-t nat -s %s -d %s -j ACCEPT" % - (FLAGS.fixed_range, FLAGS.dmz_cidr)) - _confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" % - {'range': FLAGS.fixed_range}) + _confirm_rule("POSTROUTING", '-t', 'nat', '-s', FLAGS.fixed_range, + '-d', FLAGS.dmz_cidr, '-j', 'ACCEPT') + _confirm_rule("POSTROUTING", '-t', 'nat', '-s', FLAGS.fixed_range, + '-d', FLAGS.fixed_range, '-j', 'ACCEPT') def bind_floating_ip(floating_ip, check_exit_code=True): """Bind ip to public interface""" - _execute("sudo ip addr add %s dev %s" % (floating_ip, - FLAGS.public_interface), + _execute('sudo', 'ip', 'addr', 'add', floating_ip, + 'dev', FLAGS.public_interface), check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): """Unbind a public ip from public interface""" - _execute("sudo ip addr del %s dev %s" % (floating_ip, - FLAGS.public_interface)) + _execute('sudo', 'ip', 'addr', 'del', floating_ip, + 'dev', FLAGS.public_interface)) def ensure_vlan_forward(public_ip, port, private_ip): """Sets up forwarding rules for vlan""" - _confirm_rule("FORWARD", "-d %s -p udp --dport 1194 -j ACCEPT" % - private_ip) - _confirm_rule("PREROUTING", - "-t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" - % (public_ip, port, private_ip)) + _confirm_rule("FORWARD", '-d', private_ip, '-p', 'udp', + '--dport', '1194', '-j', 'ACCEPT') + _confirm_rule("PREROUTING", '-t', 'nat', '-d', public_ip, '-p', 'udp', + '--dport', port, '-j', 'DNAT', '--to', '%s:1194' + % private_ip) def ensure_floating_forward(floating_ip, fixed_ip): """Ensure floating ip forwarding rule""" - _confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s" - % (floating_ip, fixed_ip)) - _confirm_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s" - % (floating_ip, fixed_ip)) - _confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s" - % (fixed_ip, floating_ip)) + _confirm_rule("PREROUTING", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT', + '--to', fixed_ip) + _confirm_rule("OUTPUT", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT', + '--to', fixed_ip) + _confirm_rule("SNATTING", '-t', 'nat', '-s', fixed_ip, '-j', 'SNAT', + '--to', floating_ip) def remove_floating_forward(floating_ip, fixed_ip): """Remove forwarding for floating ip""" - _remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s" - % (floating_ip, fixed_ip)) - _remove_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s" - % (floating_ip, fixed_ip)) - _remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s" - % (fixed_ip, floating_ip)) + _remove_rule("PREROUTING", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT', + '--to', fixed_ip) + _remove_rule("OUTPUT", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT', + '--to', fixed_ip) + _remove_rule("SNATTING", '-t', 'nat', '-s', fixed_ip, '-j', 'SNAT', + '--to', floating_ip) def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): @@ -185,9 +189,9 @@ def ensure_vlan(vlan_num): interface = "vlan%s" % vlan_num if not _device_exists(interface): LOG.debug(_("Starting VLAN inteface %s"), interface) - _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") - _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) - _execute("sudo ip link set %s up" % interface) + _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD') + _execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num) + _execute('sudo', 'ip', 'link', 'set', interface, 'up') return interface @@ -206,52 +210,54 @@ def ensure_bridge(bridge, interface, net_attrs=None): """ if not _device_exists(bridge): LOG.debug(_("Starting Bridge interface for %s"), interface) - _execute("sudo brctl addbr %s" % bridge) - _execute("sudo brctl setfd %s 0" % bridge) + _execute('sudo', 'brctl', 'addbr', bridge) + _execute('sudo', 'brctl', 'setfd', bridge, 0) # _execute("sudo brctl setageing %s 10" % bridge) - _execute("sudo brctl stp %s off" % bridge) - _execute("sudo ip link set %s up" % bridge) + _execute('sudo', 'brctl', 'stp', bridge', 'off') + _execute('sudo', 'ip', 'link', 'set', bridge, up) if net_attrs: # NOTE(vish): The ip for dnsmasq has to be the first address on the # bridge for it to respond to reqests properly suffix = net_attrs['cidr'].rpartition('/')[2] - out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" % - (net_attrs['gateway'], - suffix, - net_attrs['broadcast'], - bridge), + out, err = _execute('sudo', 'ip', 'addr', 'add', + "%s/%s" % + (net_attrs['gateway'], suffix), + 'brd', + net-attrs['broadcast'], + 'dev', + bridge, check_exit_code=False) if err and err != "RTNETLINK answers: File exists\n": raise exception.Error("Failed to add ip: %s" % err) if(FLAGS.use_ipv6): - _execute("sudo ip -f inet6 addr change %s dev %s" % - (net_attrs['cidr_v6'], bridge)) + _execute('sudo', 'ip', '-f', 'inet6', 'addr', + 'change', net_attrs['cidr_v6'], + 'dev', bridge) # NOTE(vish): If the public interface is the same as the # bridge, then the bridge has to be in promiscuous # to forward packets properly. if(FLAGS.public_interface == bridge): - _execute("sudo ip link set dev %s promisc on" % bridge) + _execute('sudo', 'ip', 'link', 'set', 'dev', bridge, 'promisc', 'on') if interface: # NOTE(vish): This will break if there is already an ip on the # interface, so we move any ips to the bridge gateway = None - out, err = _execute("sudo route -n") + out, err = _execute('sudo', 'route', '-n') for line in out.split("\n"): fields = line.split() if fields and fields[0] == "0.0.0.0" and fields[-1] == interface: gateway = fields[1] - out, err = _execute("sudo ip addr show dev %s scope global" % - interface) + out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, + 'scope', 'global') for line in out.split("\n"): fields = line.split() if fields and fields[0] == "inet": params = ' '.join(fields[1:-1]) - _execute("sudo ip addr del %s dev %s" % (params, fields[-1])) - _execute("sudo ip addr add %s dev %s" % (params, bridge)) + _execute('sudo', 'ip', 'addr', 'del', params, 'dev', fields[-1]) + _execute('sudo', 'ip', 'addr', 'add', params, 'dev', bridge) if gateway: - _execute("sudo route add 0.0.0.0 gw %s" % gateway) - out, err = _execute("sudo brctl addif %s %s" % - (bridge, interface), + _execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway) + out, err = _execute('sudo', 'brctl', 'addif, bridge, interface, check_exit_code=False) if (err and err != "device %s is already a member of a bridge; can't " @@ -259,18 +265,18 @@ def ensure_bridge(bridge, interface, net_attrs=None): raise exception.Error("Failed to add interface: %s" % err) if FLAGS.use_nova_chains: - (out, err) = _execute("sudo iptables -N nova_forward", + (out, err) = _execute('sudo', 'iptables', '-N', 'nova_forward, check_exit_code=False) if err != 'iptables: Chain already exists.\n': # NOTE(vish): chain didn't exist link chain - _execute("sudo iptables -D FORWARD -j nova_forward", + _execute('sudo', 'iptables, '-D', 'FORWARD', '-j', 'nova_forward', check_exit_code=False) - _execute("sudo iptables -A FORWARD -j nova_forward") + _execute('sudo', 'iptables', '-A', 'FORWARD', '-j', 'nova_forward') - _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge) - _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge) - _execute("sudo iptables -N nova-local", check_exit_code=False) - _confirm_rule("FORWARD", "-j nova-local") + _confirm_rule("FORWARD", '--in-interface', bridge, '-j', 'ACCEPT') + _confirm_rule("FORWARD", '--out-interface', bridge, '-j', 'ACCEPT') + _execute('sudo', 'iptables', '-N', 'nova-local', check_exit_code=False) + _confirm_rule("FORWARD", '-j', 'nova-local') def get_dhcp_hosts(context, network_id): @@ -304,11 +310,11 @@ def update_dhcp(context, network_id): # if dnsmasq is already running, then tell it to reload if pid: - out, _err = _execute('cat /proc/%d/cmdline' % pid, + out, _err = _execute('cat', "/proc/%d/cmdline" % pid, check_exit_code=False) if conffile in out: try: - _execute('sudo kill -HUP %d' % pid) + _execute('sudo', 'kill', '-HUP', pid) return except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("Hupping dnsmasq threw %s"), exc) @@ -349,11 +355,11 @@ interface %s # if radvd is already running, then tell it to reload if pid: - out, _err = _execute('cat /proc/%d/cmdline' + out, _err = _execute('cat', "/proc/%d/cmdline' % pid, check_exit_code=False) if conffile in out: try: - _execute('sudo kill %d' % pid) + _execute('sudo', 'kill', pid) except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("killing radvd threw %s"), exc) else: @@ -374,23 +380,23 @@ def _host_dhcp(fixed_ip_ref): fixed_ip_ref['address']) -def _execute(cmd, *args, **kwargs): +def _execute(*cmd, **kwargs): """Wrapper around utils._execute for fake_network""" if FLAGS.fake_network: - LOG.debug("FAKE NET: %s", cmd) + LOG.debug("FAKE NET: %s", ' '.join(cmd)) return "fake", 0 else: - return utils.execute(cmd, *args, **kwargs) + return utils.execute(*cmd, **kwargs) def _device_exists(device): """Check if ethernet device exists""" - (_out, err) = _execute("ip link show dev %s" % device, + (_out, err) = _execute('ip', 'link', 'show', 'dev', device, check_exit_code=False) return not err -def _confirm_rule(chain, cmd, append=False): +def _confirm_rule(chain, *cmd, append=False): """Delete and re-add iptables rule""" if FLAGS.use_nova_chains: chain = "nova_%s" % chain.lower() @@ -398,16 +404,16 @@ def _confirm_rule(chain, cmd, append=False): loc = "-A" else: loc = "-I" - _execute("sudo iptables --delete %s %s" % (chain, cmd), + _execute('sudo', 'iptables', '--delete', chain, *cmd, check_exit_code=False) - _execute("sudo iptables %s %s %s" % (loc, chain, cmd)) + _execute('sudo', 'iptables', loc, chain, *cmd) -def _remove_rule(chain, cmd): +def _remove_rule(chain, *cmd): """Remove iptables rule""" if FLAGS.use_nova_chains: chain = "%s" % chain.lower() - _execute("sudo iptables --delete %s %s" % (chain, cmd)) + _execute('sudo', 'iptables', '--delete', chain, *cmd) def _dnsmasq_cmd(net): @@ -444,7 +450,7 @@ def _stop_dnsmasq(network): if pid: try: - _execute('sudo kill -TERM %d' % pid) + _execute('sudo', 'kill', '-TERM', pid) except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("Killing dnsmasq threw %s"), exc) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index ce1c77210..6d2d8b771 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -343,13 +343,13 @@ def lease_ip(private_ip): private_ip) instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip) - cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'), - instance_ref['mac_address'], - private_ip) + cmd = (binpath('nova-dhcpbridge'), 'add' + instance_ref['mac_address'], + private_ip, 'fake') env = {'DNSMASQ_INTERFACE': network_ref['bridge'], 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) + (out, err) = utils.execute(*cmd, addl_env=env) LOG.debug("ISSUE_IP: %s, %s ", out, err) @@ -359,11 +359,11 @@ def release_ip(private_ip): private_ip) instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip) - cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'), - instance_ref['mac_address'], - private_ip) + cmd = (binpath('nova-dhcpbridge'), 'del', + instance_ref['mac_address'], + private_ip, 'fake') env = {'DNSMASQ_INTERFACE': network_ref['bridge'], 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) + (out, err) = utils.execute(*cmd, addl_env=env) LOG.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/utils.py b/nova/utils.py index 40a8d8d8c..c96b85294 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -125,15 +125,15 @@ def fetchfile(url, target): # c.perform() # c.close() # fp.close() - execute("curl","--fail",url,"-o",target) + execute("curl", "--fail", url, "-o", target) -def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): - LOG.debug(_("Running cmd (subprocess): %s"), cmd) +def execute(*cmd, process_input=None, addl_env=None, check_exit_code=True): + LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd)) env = os.environ.copy() if addl_env: env.update(addl_env) - obj = subprocess.Popen(cmd, stdin=subprocess.PIPE, + obj = subprocess.Popen(*cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) result = None if process_input != None: @@ -148,7 +148,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): raise ProcessExecutionError(exit_code=obj.returncode, stdout=stdout, stderr=stderr, - cmd=cmd) + cmd=' '.join(cmd)) # NOTE(termie): this appears to be necessary to let the subprocess call # clean something up in between calls, without it two # execute calls in a row hangs the second one @@ -158,7 +158,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): - LOG.debug(_("Running cmd (SSH): %s"), cmd) + LOG.debug(_("Running cmd (SSH): %s"), ' '.join(cmd)) if addl_env: raise exception.Error("Environment not supported over SSH") @@ -187,7 +187,7 @@ def ssh_execute(ssh, cmd, process_input=None, raise exception.ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, - cmd=cmd) + cmd=' '.join(cmd)) return (stdout, stderr) @@ -254,7 +254,7 @@ def last_octet(address): def get_my_linklocal(interface): try: - if_str = execute("ip","-f","inet6","-o","addr","show", interface) + if_str = execute("ip", "-f", "inet6", "-o", "addr", "show", interface) condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link" links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] diff --git a/nova/virt/disk.py b/nova/virt/disk.py index 2bded07a4..203517275 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -49,10 +49,10 @@ def extend(image, size): file_size = os.path.getsize(image) if file_size >= size: return - utils.execute('truncate -s %s %s' % (size, image)) + utils.execute('truncate', '-s', size, image) # NOTE(vish): attempts to resize filesystem - utils.execute('e2fsck -fp %s' % image, check_exit_code=False) - utils.execute('resize2fs %s' % image, check_exit_code=False) + utils.execute('e2fsck', '-fp', mage, check_exit_code=False) + utils.execute('resize2fs', image, check_exit_code=False) def inject_data(image, key=None, net=None, partition=None, nbd=False): @@ -68,7 +68,7 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): try: if not partition is None: # create partition - out, err = utils.execute('sudo kpartx -a %s' % device) + out, err = utils.execute('sudo', 'kpartx', '-a', device) if err: raise exception.Error(_('Failed to load partition: %s') % err) mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], @@ -84,13 +84,14 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): mapped_device) # Configure ext2fs so that it doesn't auto-check every N boots - out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) + out, err = utils.execute('sudo', 'tune2fs', + '-c', 0, '-i', 0, mapped_device) tmpdir = tempfile.mkdtemp() try: # mount loopback to dir out, err = utils.execute( - 'sudo mount %s %s' % (mapped_device, tmpdir)) + 'sudo', 'mount', mapped_device, tmpdir) if err: raise exception.Error(_('Failed to mount filesystem: %s') % err) @@ -103,13 +104,13 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): _inject_net_into_fs(net, tmpdir) finally: # unmount device - utils.execute('sudo umount %s' % mapped_device) + utils.execute('sudo', 'umount', mapped_device) finally: # remove temporary directory - utils.execute('rmdir %s' % tmpdir) + utils.execute('rmdir', tmpdir) if not partition is None: # remove partitions - utils.execute('sudo kpartx -d %s' % device) + utils.execute('sudo', 'kpartx', '-d', device) finally: _unlink_device(device, nbd) @@ -118,7 +119,7 @@ def _link_device(image, nbd): """Link image to device using loopback or nbd""" if nbd: device = _allocate_device() - utils.execute('sudo qemu-nbd -c %s %s' % (device, image)) + utils.execute('sudo', 'qemu-nbd', '-c', device, image) # NOTE(vish): this forks into another process, so give it a chance # to set up before continuuing for i in xrange(FLAGS.timeout_nbd): @@ -127,7 +128,7 @@ def _link_device(image, nbd): time.sleep(1) raise exception.Error(_('nbd device %s did not show up') % device) else: - out, err = utils.execute('sudo losetup --find --show %s' % image) + out, err = utils.execute('sudo', 'losetup', '--find', '--show', image) if err: raise exception.Error(_('Could not attach image to loopback: %s') % err) @@ -137,10 +138,10 @@ def _link_device(image, nbd): def _unlink_device(device, nbd): """Unlink image from device using loopback or nbd""" if nbd: - utils.execute('sudo qemu-nbd -d %s' % device) + utils.execute('sudo', 'qemu-nbd', '-d', device) _free_device(device) else: - utils.execute('sudo losetup --detach %s' % device) + utils.execute('sudo', 'losetup', '--detach', device) _DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)] @@ -170,11 +171,12 @@ def _inject_key_into_fs(key, fs): fs is the path to the base of the filesystem into which to inject the key. """ sshdir = os.path.join(fs, 'root', '.ssh') - utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter - utils.execute('sudo chown root %s' % sshdir) - utils.execute('sudo chmod 700 %s' % sshdir) + utils.execute('sudo', 'mkdir', '-p', sshdir) # existing dir doesn't matter + utils.execute('sudo', 'chown', 'root', sshdir) + utils.execute('sudo', 'chmod', '700', sshdir) keyfile = os.path.join(sshdir, 'authorized_keys') - utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') + # TODO:EWINDISCH: not sure about the following /w execv patch + utils.execute('sudo', 'tee', '-a', keyfile, '\n' + key.strip() + '\n') def _inject_net_into_fs(net, fs): @@ -183,8 +185,8 @@ def _inject_net_into_fs(net, fs): net is the contents of /etc/network/interfaces. """ netdir = os.path.join(os.path.join(fs, 'etc'), 'network') - utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter - utils.execute('sudo chown root:root %s' % netdir) - utils.execute('sudo chmod 755 %s' % netdir) + utils.execute('sudo', 'mkdir', '-p', netdir) # existing dir doesn't matter + utils.execute('sudo', 'chown', 'root:root', netdir) + utils.execute('sudo', 'chmod', 755, netdir) netfile = os.path.join(netdir, 'interfaces') - utils.execute('sudo tee %s' % netfile, net) + utils.execute('sudo', 'tee', netfile, net) diff --git a/nova/virt/images.py b/nova/virt/images.py index 7a6fef330..4b11d1667 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -94,8 +94,7 @@ def _fetch_s3_image(image, path, user, project): cmd += ['-H', '\'%s: %s\'' % (k, v)] cmd += ['-o', path] - cmd_out = ' '.join(cmd) - return utils.execute(cmd_out) + return utils.execute(*cmd) def _fetch_local_image(image, path, user, project): @@ -103,7 +102,7 @@ def _fetch_local_image(image, path, user, project): if sys.platform.startswith('win'): return shutil.copy(source, path) else: - return utils.execute('cp %s %s' % (source, path)) + return utils.execute('cp', source, path) def _image_path(path): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 4e0fd106f..464ec475c 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -438,8 +438,10 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): LOG.info(_("cool, it's a device")) - out, err = utils.execute("sudo dd if=%s iflag=nonblock" % - virsh_output, check_exit_code=False) + out, err = utils.execute('sudo', 'dd', + "if=%s" % virsh_output, + 'iflag=nonblock', + check_exit_code=False) return out else: return '' @@ -461,11 +463,11 @@ class LibvirtConnection(object): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - utils.execute('sudo chown %d %s' % (os.getuid(), console_log)) + utils.execute('sudo', 'chown', s.getuid(), console_log) if FLAGS.libvirt_type == 'xen': # Xen is special - virsh_output = utils.execute("virsh ttyconsole %s" % + virsh_output = utils.execute('virsh', 'ttyconsole', instance['name']) data = self._flush_xen_console(virsh_output) fpath = self._append_to_file(data, console_log) @@ -482,7 +484,10 @@ class LibvirtConnection(object): port = random.randint(int(start_port), int(end_port)) # netcat will exit with 0 only if the port is in use, # so a nonzero return value implies it is unused - cmd = 'netcat 0.0.0.0 %s -w 1 Date: Tue, 8 Mar 2011 01:08:13 -0500 Subject: Fix todo comment --- nova/virt/libvirt_conn.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index e1cd75306..a5256bbc2 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -486,8 +486,9 @@ class LibvirtConnection(object): # netcat will exit with 0 only if the port is in use, # so a nonzero return value implies it is unused - # TODO:ewindisch: subprocess lets us do this... - # but utils.execute abstracts it away from us + # TODO(ewindisch): broken /w execvp patch. + # subprocess lets us do this, but utils.execute + # abstracts it away from us cmd = 'netcat', '0.0.0.0', port, '-w', '1', ' Date: Tue, 8 Mar 2011 18:53:20 +0100 Subject: Added ability to remove networks on nova-manage command --- bin/nova-manage | 6 +++++- nova/db/api.py | 7 +++++++ nova/db/sqlalchemy/api.py | 7 +++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 94b0d5946..8ddfea5c2 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -548,7 +548,11 @@ class NetworkCommands(object): def delete(self, fixed_range): """Deletes a network""" network = db.network_get_by_cidr(context.get_admin_context(), fixed_range) - + if network.project_id is not None: + raise ValueError(_('Network must be disassociated from project %s' + ' before delete' %network.project_id)) + db.network_delete_safe(context.get_admin_context(), network.id) + class ServiceCommands(object): """Enable and disable running services""" diff --git a/nova/db/api.py b/nova/db/api.py index 04f5fd72f..7ad99c1f4 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -488,6 +488,13 @@ def network_create_safe(context, values): """ return IMPL.network_create_safe(context, values) +def network_delete_safe(context, network_id): + """Delete network with key network_id + + This method assumes that the network is not associated with any project + """ + return IMPL.network_delete_safe(context, network_id) + def network_create_fixed_ips(context, network_id, num_vpn_clients): """Create the ips for the network, reserving sepecified ips.""" diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index c8f42425d..90730d325 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1042,6 +1042,13 @@ def network_create_safe(context, values): except IntegrityError: return None +@require_admin_context +def network_delete_safe(context, network_id): + session = get_session() + with session.begin(): + network_ref = network_get(context, network_id=network_id, session=session) + session.delete(network_ref) + @require_admin_context def network_disassociate(context, network_id): -- cgit From 1caceddf431a1ad1ef22235c2206bccf39fde5c5 Mon Sep 17 00:00:00 2001 From: Cerberus Date: Tue, 8 Mar 2011 14:24:01 -0600 Subject: Nits --- nova/virt/xenapi/vmops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7fe1f6ff0..86dbf251b 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -90,7 +90,7 @@ class VMOps(object): vm = VMHelper.lookup(self._session, instance_name) if vm is not None: raise exception.Duplicate(_('Attempted to create' - ' non-unique name %s') % instance_name) + ' non-unique name %s') % instance_name) #ensure enough free memory is available if not VMHelper.ensure_free_mem(self._session, instance): @@ -104,7 +104,7 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - vdi_ref = kernel = ramdisk = pv_kernel = None + kernel = ramdisk = pv_kernel = None # Are we building from a pre-existing disk? vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) -- cgit From dd2f0019297d01fe5d6b3dae4efc72946191be75 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 8 Mar 2011 22:14:25 +0000 Subject: Use disk_format and container_format instead of image type --- nova/api/openstack/servers.py | 2 +- nova/virt/xenapi/vm_utils.py | 18 ++++++++++-------- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 14 +++++++++++--- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index c2bf42b72..85999764f 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -450,7 +450,7 @@ class Controller(wsgi.Controller): _("Cannot build from image %(image_id)s, status not active") % locals()) - if image['type'] != 'machine': + if image['disk_format'] != 'ami': return None, None try: diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 80b7540d4..ce081a2d6 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -467,19 +467,21 @@ class VMHelper(HelperBase): "%(image_id)s, instance %(instance_id)s") % locals()) def determine_from_glance(): - glance_type2nova_type = {'machine': ImageType.DISK, - 'raw': ImageType.DISK_RAW, - 'vhd': ImageType.DISK_VHD, - 'kernel': ImageType.KERNEL_RAMDISK, - 'ramdisk': ImageType.KERNEL_RAMDISK} + glance_disk_format2nova_type = { + 'ami': ImageType.DISK, + 'aki': ImageType.KERNEL_RAMDISK, + 'ari': ImageType.KERNEL_RAMDISK, + 'raw': ImageType.DISK_RAW, + 'vhd': ImageType.DISK_VHD} client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) meta = client.get_image_meta(instance.image_id) - type_ = meta['type'] + disk_format = meta['disk_format'] try: - return glance_type2nova_type[type_] + return glance_disk_format2nova_type[disk_format] except KeyError: raise exception.NotFound( - _("Unrecognized image type '%(type_)s'") % locals()) + _("Unrecognized disk_format '%(disk_format)s'") + % locals()) def determine_from_instance(): if instance.kernel_id: diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index aa12d432a..201b99fda 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -201,13 +201,21 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): # to request conn.putrequest('PUT', '/images/%s' % image_id) - # TODO(sirp): make `store` configurable + # NOTE(sirp): There is some confusion around OVF. Here's a summary of + # where we currently stand: + # 1. OVF as a container format is misnamed. We really should be using + # OVA since that is the name for the container format; OVF is the + # standard applied to the manifest file contained within. + # 2. We're currently uploading a vanilla tarball. In order to be OVF/OVA + # compliant, we'll need to embed a minimal OVF manifest as the first + # file. headers = { 'content-type': 'application/octet-stream', 'transfer-encoding': 'chunked', - 'x-image-meta-is_public': 'True', + 'x-image-meta-is-public': 'True', 'x-image-meta-status': 'queued', - 'x-image-meta-type': 'vhd'} + 'x-image-meta-disk-format': 'vhd', + 'x-image-meta-container-format': 'ovf'} for header, value in headers.iteritems(): conn.putheader(header, value) conn.endheaders() -- cgit From 7a6833c883a04fd7920bff7367c9e28a35858d8d Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Tue, 8 Mar 2011 23:17:50 +0000 Subject: Accidentally left some bad data around --- nova/tests/test_xenapi.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 919a38c06..916555af3 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -376,6 +376,11 @@ class XenAPIMigrateInstance(test.TestCase): stubs.stub_out_migration_methods(self.stubs) glance_stubs.stubout_glance_client(self.stubs, glance_stubs.FakeGlance) + def tearDown(self): + super(XenAPIMigrateInstance, self).tearDown() + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + self.stubs.UnsetAll() def test_migrate_disk_and_power_off(self): instance = db.instance_create(self.values) -- cgit From 698398fdc2a05a0930591d3f3d386ad24a322359 Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Tue, 8 Mar 2011 23:24:19 +0000 Subject: Pep8 fixes --- nova/tests/test_xenapi.py | 2 ++ nova/tests/xenapi/stubs.py | 1 + nova/virt/xenapi/vmops.py | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 916555af3..c26dc8639 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -376,6 +376,7 @@ class XenAPIMigrateInstance(test.TestCase): stubs.stub_out_migration_methods(self.stubs) glance_stubs.stubout_glance_client(self.stubs, glance_stubs.FakeGlance) + def tearDown(self): super(XenAPIMigrateInstance, self).tearDown() self.manager.delete_project(self.project) @@ -394,6 +395,7 @@ class XenAPIMigrateInstance(test.TestCase): conn = xenapi_conn.get_connection(False) conn.finish_resize(instance, dict(base_copy='hurr', cow='durr')) + class XenAPIDetermineDiskImageTestCase(test.TestCase): """ Unit tests for code that detects the ImageType diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index d8e358611..70d46a1fb 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -237,6 +237,7 @@ class FakeSessionForMigrationTests(fake.SessionBase): vm['is_a_template'] = False vm['is_control_domain'] = False + def stub_out_migration_methods(stubs): def fake_get_snapshot(self, instance): return 'foo', 'bar' diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 43c23806e..562ecd4d5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -83,7 +83,7 @@ class VMOps(object): def spawn(self, instance): vdi_uuid = self.create_disk(instance) self._spawn_with_disk(instance, vdi_uuid=vdi_uuid) - + def _spawn_with_disk(self, instance, vdi_uuid): """Create VM instance""" instance_name = instance.name -- cgit From 6207abe3068964c586d06bb0e3740b8bad922dca Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 8 Mar 2011 23:26:33 +0000 Subject: Fixing tests --- nova/tests/glance/stubs.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 3ff8d7ce5..5872552ec 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -35,23 +35,28 @@ class FakeGlance(object): IMAGE_FIXTURES = { IMAGE_MACHINE: { 'image_meta': {'name': 'fakemachine', 'size': 0, - 'type': 'machine'}, + 'disk_format': 'ami', + 'container_format': 'ami'}, 'image_data': StringIO.StringIO('')}, IMAGE_KERNEL: { 'image_meta': {'name': 'fakekernel', 'size': 0, - 'type': 'kernel'}, + 'disk_format': 'aki', + 'container_format': 'aki'}, 'image_data': StringIO.StringIO('')}, IMAGE_RAMDISK: { 'image_meta': {'name': 'fakeramdisk', 'size': 0, - 'type': 'ramdisk'}, + 'disk_format': 'ari', + 'container_format': 'ari'}, 'image_data': StringIO.StringIO('')}, IMAGE_RAW: { 'image_meta': {'name': 'fakeraw', 'size': 0, - 'type': 'raw'}, + 'disk_format': 'raw', + 'container_format': 'bare'}, 'image_data': StringIO.StringIO('')}, IMAGE_VHD: { 'image_meta': {'name': 'fakevhd', 'size': 0, - 'type': 'vhd'}, + 'disk_format': 'vhd', + 'container_format': 'ovf'}, 'image_data': StringIO.StringIO('')}} def __init__(self, host, port=None, use_ssl=False): -- cgit From a4830f83afd78cdb96dc3e474eb4efc167de7737 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 8 Mar 2011 16:45:20 -0800 Subject: Sorted imports correctly --- bin/nova-api | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-api b/bin/nova-api index 85ca4eefd..06bb855cb 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -34,9 +34,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) -from nova import service from nova import flags from nova import log as logging +from nova import service from nova import utils from nova import version from nova import wsgi -- cgit From e8c8fd3f232371625f0924410c4c09c32339b113 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 8 Mar 2011 16:47:43 -0800 Subject: Renamed FLAG.paste_config -> FLAG.api_paste_config --- nova/service.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/service.py b/nova/service.py index 5a8d58695..460f36f7a 100644 --- a/nova/service.py +++ b/nova/service.py @@ -56,7 +56,7 @@ flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen') flags.DEFINE_string('osapi_listen', "0.0.0.0", 'IP address for OpenStack API to listen') flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen') -flags.DEFINE_string('paste_config', "api-paste.ini", +flags.DEFINE_string('api_paste_config', "api-paste.ini", 'File name for the paste.deploy config for nova-api') @@ -240,10 +240,10 @@ class ApiService(WsgiService): @classmethod def create(cls, conf=None): if not conf: - conf = wsgi.paste_config_file(FLAGS.paste_config) + conf = wsgi.paste_config_file(FLAGS.api_paste_config) if not conf: message = (_("No paste configuration found for: %s"), - FLAGS.paste_config) + FLAGS.api_paste_config) raise exception.Error(message) api_endpoints = ['ec2', 'osapi'] service = cls(conf, api_endpoints) @@ -315,7 +315,7 @@ def _run_wsgi(paste_config_file, apis): getattr(FLAGS, "%s_listen" % api))) if len(apps) == 0: logging.error(_("No known API applications configured in %s."), - paste_config_file) + paste_config_file) return server = wsgi.Server() -- cgit From 59fa70102a06dce9f86b9b29825245bc54c01598 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 8 Mar 2011 16:51:05 -0800 Subject: Added documentation about needed flags --- nova/service.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/service.py b/nova/service.py index 460f36f7a..af20db01c 100644 --- a/nova/service.py +++ b/nova/service.py @@ -221,7 +221,12 @@ class Service(object): class WsgiService(object): - """Base class for WSGI based services.""" + """Base class for WSGI based services. + + For each api you define, you must also define these flags: + :_listen: The address on which to listen + :_listen_port: The port on which to listen + """ def __init__(self, conf, apis): self.conf = conf -- cgit From a320b5df9f916adf8422ed312306c77570d392c2 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Wed, 9 Mar 2011 00:30:05 -0500 Subject: execvp: almost passes tests --- nova/api/ec2/cloud.py | 2 +- nova/network/linux_net.py | 21 +++++++++++---------- nova/tests/test_network.py | 2 +- nova/tests/test_virt.py | 11 ++++++----- nova/utils.py | 19 +++++++++++++------ nova/virt/libvirt_conn.py | 11 ++++++----- nova/virt/xenapi/vm_utils.py | 6 ++---- nova/volume/driver.py | 5 +++-- 8 files changed, 43 insertions(+), 34 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 0d22a3f46..b7d72d1c1 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -115,7 +115,7 @@ class CloudController(object): start = os.getcwd() os.chdir(FLAGS.ca_path) # TODO(vish): Do this with M2Crypto instead - utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh") + utils.runthis(_("Generating root CA: %s"), "sh", "genrootca.sh") os.chdir(start) def _get_mpi_data(self, context, project_id): diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index ad019a8c0..b66a1adb7 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -139,14 +139,14 @@ def init_host(): def bind_floating_ip(floating_ip, check_exit_code=True): """Bind ip to public interface""" _execute('sudo', 'ip', 'addr', 'add', floating_ip, - 'dev', FLAGS.public_interface), + 'dev', FLAGS.public_interface, check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): """Unbind a public ip from public interface""" _execute('sudo', 'ip', 'addr', 'del', floating_ip, - 'dev', FLAGS.public_interface)) + 'dev', FLAGS.public_interface) def ensure_vlan_forward(public_ip, port, private_ip): @@ -213,7 +213,7 @@ def ensure_bridge(bridge, interface, net_attrs=None): _execute('sudo', 'brctl', 'addbr', bridge) _execute('sudo', 'brctl', 'setfd', bridge, 0) # _execute("sudo brctl setageing %s 10" % bridge) - _execute('sudo', 'brctl', 'stp', bridge', 'off') + _execute('sudo', 'brctl', 'stp', bridge, 'off') _execute('sudo', 'ip', 'link', 'set', bridge, up) if net_attrs: # NOTE(vish): The ip for dnsmasq has to be the first address on the @@ -223,7 +223,7 @@ def ensure_bridge(bridge, interface, net_attrs=None): "%s/%s" % (net_attrs['gateway'], suffix), 'brd', - net-attrs['broadcast'], + net_attrs['broadcast'], 'dev', bridge, check_exit_code=False) @@ -257,7 +257,7 @@ def ensure_bridge(bridge, interface, net_attrs=None): _execute('sudo', 'ip', 'addr', 'add', params, 'dev', bridge) if gateway: _execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway) - out, err = _execute('sudo', 'brctl', 'addif, bridge, interface, + out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, check_exit_code=False) if (err and err != "device %s is already a member of a bridge; can't " @@ -265,11 +265,11 @@ def ensure_bridge(bridge, interface, net_attrs=None): raise exception.Error("Failed to add interface: %s" % err) if FLAGS.use_nova_chains: - (out, err) = _execute('sudo', 'iptables', '-N', 'nova_forward, + (out, err) = _execute('sudo', 'iptables', '-N', 'nova_forward', check_exit_code=False) if err != 'iptables: Chain already exists.\n': # NOTE(vish): chain didn't exist link chain - _execute('sudo', 'iptables, '-D', 'FORWARD', '-j', 'nova_forward', + _execute('sudo', 'iptables', '-D', 'FORWARD', '-j', 'nova_forward', check_exit_code=False) _execute('sudo', 'iptables', '-A', 'FORWARD', '-j', 'nova_forward') @@ -355,7 +355,7 @@ interface %s # if radvd is already running, then tell it to reload if pid: - out, _err = _execute('cat', "/proc/%d/cmdline' + out, _err = _execute('cat', '/proc/%d/cmdline' % pid, check_exit_code=False) if conffile in out: try: @@ -383,7 +383,7 @@ def _host_dhcp(fixed_ip_ref): def _execute(*cmd, **kwargs): """Wrapper around utils._execute for fake_network""" if FLAGS.fake_network: - LOG.debug("FAKE NET: %s", ' '.join(cmd)) + LOG.debug("FAKE NET: %s", " ".join(map(str, cmd))) return "fake", 0 else: return utils.execute(*cmd, **kwargs) @@ -396,7 +396,8 @@ def _device_exists(device): return not err -def _confirm_rule(chain, *cmd, append=False): +def _confirm_rule(chain, *cmd, **kwargs): + append=kwargs.get('append',False) """Delete and re-add iptables rule""" if FLAGS.use_nova_chains: chain = "nova_%s" % chain.lower() diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 6d2d8b771..19099ff4c 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -343,7 +343,7 @@ def lease_ip(private_ip): private_ip) instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip) - cmd = (binpath('nova-dhcpbridge'), 'add' + cmd = (binpath('nova-dhcpbridge'), 'add', instance_ref['mac_address'], private_ip, 'fake') env = {'DNSMASQ_INTERFACE': network_ref['bridge'], diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index f151ae911..7f1ad002e 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -315,15 +315,16 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) # self.fw.add_instance(instance_ref) - def fake_iptables_execute(cmd, process_input=None): - if cmd == 'sudo ip6tables-save -t filter': + def fake_iptables_execute(*cmd, **kwargs): + process_input=kwargs.get('process_input', None) + if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'): return '\n'.join(self.in6_rules), None - if cmd == 'sudo iptables-save -t filter': + if cmd == ('sudo', 'iptables-save', '-t', 'filter'): return '\n'.join(self.in_rules), None - if cmd == 'sudo iptables-restore': + if cmd == ('sudo', 'iptables-restore'): self.out_rules = process_input.split('\n') return '', '' - if cmd == 'sudo ip6tables-restore': + if cmd == ('sudo', 'ip6tables-restore'): self.out6_rules = process_input.split('\n') return '', '' self.fw.execute = fake_iptables_execute diff --git a/nova/utils.py b/nova/utils.py index c96b85294..9b51f8b40 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -128,13 +128,20 @@ def fetchfile(url, target): execute("curl", "--fail", url, "-o", target) -def execute(*cmd, process_input=None, addl_env=None, check_exit_code=True): +def execute(*cmd, **kwargs): + process_input=kwargs.get('process_input', None) + addl_env=kwargs.get('addl_env', None) + check_exit_code=kwargs.get('check_exit_code', True) + stdin=kwargs.get('stdin', subprocess.PIPE) + stdout=kwargs.get('stdout', subprocess.PIPE) + stderr=kwargs.get('stderr', subprocess.PIPE) + LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd)) env = os.environ.copy() if addl_env: env.update(addl_env) - obj = subprocess.Popen(*cmd, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) + obj = subprocess.Popen(cmd, stdin=stdin, + stdout=stdout, stderr=stderr, env=env) result = None if process_input != None: result = obj.communicate(process_input) @@ -220,9 +227,9 @@ def debug(arg): return arg -def runthis(prompt, cmd, check_exit_code=True): - LOG.debug(_("Running %s"), (cmd)) - rv, err = execute(cmd, check_exit_code=check_exit_code) +def runthis(prompt, *cmd, **kwargs): + LOG.debug(_("Running %s"), (" ".join(cmd))) + rv, err = execute(*cmd, **kwargs) def generate_uid(topic, size=8): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index a5256bbc2..76f31f91a 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -540,8 +540,8 @@ class LibvirtConnection(object): if not os.path.exists(base): fn(target=base, *args, **kwargs) if cow: - utils.execute('qemu-img', 'create', '-f', 'qcow2', "'-o'', - "cluster_size=2M,backing_file=%s" % base, + utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o', + 'cluster_size=2M,backing_file=%s' % base, target) else: utils.execute('cp', base, target) @@ -554,7 +554,7 @@ class LibvirtConnection(object): def _create_local(self, target, local_gb): """Create a blank image of specified size""" - utils.execute('truncate', target, '-s', "%dG" local_gb) + utils.execute('truncate', target, '-s', "%dG" % local_gb) # TODO(vish): should we format disk by default? def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None): @@ -565,7 +565,7 @@ class LibvirtConnection(object): fname + suffix) # ensure directories exist and are writable - utils.execute('mkdir', '-p', basepath(suffix='') + utils.execute('mkdir', '-p', basepath(suffix='')) LOG.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') @@ -1245,7 +1245,8 @@ class IptablesFirewallDriver(FirewallDriver): self.apply_ruleset() def apply_ruleset(self): - current_filter, _ = self.execute('sudo iptables-save -t filter') + current_filter, _ = self.execute('sudo', 'iptables-save', + '-t', 'filter') current_lines = current_filter.split('\n') new_filter = self.modify_rules(current_lines, 4) self.execute('sudo', 'iptables-restore', diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 8fdb658fb..6ba13f980 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -915,10 +915,8 @@ def _write_partition(virtual_size, dev): LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d' ' to %(dest)s...') % locals()) - def execute(*cmd, process_input=None, check_exit_code=True): - return utils.execute(*cmd, - process_input=process_input, - check_exit_code=check_exit_code) + def execute(*cmd, **kwargs): + return utils.execute(*cmd, **kwargs) execute('parted', '--script', dest, 'mklabel', 'msdos') execute('parted', '--script', dest, 'mkpart', 'primary', diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 220c9ef9d..e9bdf162f 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -104,7 +104,8 @@ class VolumeDriver(object): def delete_volume(self, volume): """Deletes a logical volume.""" try: - self._try_execute('sudo', 'lvdisplay', '%s/%s" % + self._try_execute('sudo', 'lvdisplay', + '%s/%s' % (FLAGS.volume_group, volume['name'])) except Exception as e: @@ -550,7 +551,7 @@ class SheepdogDriver(VolumeDriver): else: sizestr = '%sG' % volume['size'] self._try_execute('qemu-img', 'create', - "sheepdog:%s" %s" % volume['name'], + "sheepdog:%s" % volume['name'], sizestr) def delete_volume(self, volume): -- cgit From 1d7358e70379607c9cce02307f4336efbd135a5d Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Wed, 9 Mar 2011 01:26:53 -0500 Subject: execvp: unit tests pass --- nova/crypto.py | 2 +- nova/utils.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/crypto.py b/nova/crypto.py index dd24723b8..20bb570a5 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -196,7 +196,7 @@ def generate_x509_cert(user_id, project_id, bits=1024): tmpdir = tempfile.mkdtemp() keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) csrfile = os.path.join(tmpdir, 'temp.csr') - utils.execute('openssl', 'genrsa', '-out', keyfile, bits) + utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits)) utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out', csrfile, '-batch', '-subj', subject) private_key = open(keyfile).read() diff --git a/nova/utils.py b/nova/utils.py index 9b51f8b40..7ddf056ea 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -135,6 +135,7 @@ def execute(*cmd, **kwargs): stdin=kwargs.get('stdin', subprocess.PIPE) stdout=kwargs.get('stdout', subprocess.PIPE) stderr=kwargs.get('stderr', subprocess.PIPE) + cmd=map(str,cmd) LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd)) env = os.environ.copy() -- cgit From 77da93886be61230dea5a4a4c4de036a57e62550 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 9 Mar 2011 06:56:42 +0000 Subject: tests and semaphore fix for image caching --- nova/tests/test_virt.py | 66 +++++++++++++++++++++++++++++++++++++++++++++++ nova/virt/libvirt_conn.py | 14 +++++++--- 2 files changed, 76 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index f151ae911..ec7498d72 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -14,12 +14,16 @@ # License for the specific language governing permissions and limitations # under the License. +import os + +import eventlet from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom from nova import context from nova import db from nova import flags +from nova import log as logging from nova import test from nova import utils from nova.api.ec2 import cloud @@ -30,6 +34,68 @@ FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') +def _concurrency(wait, done, target): + wait.wait() + done.send() + + +class CacheConcurrencyTestCase(test.TestCase): + def setUp(self): + super(CacheConcurrencyTestCase, self).setUp() + + def fake_exists(fname): + basedir = os.path.join(FLAGS.instances_path, '_base') + if fname == basedir: + return True + return False + + def fake_execute(*args, **kwargs): + pass + + self.stubs.Set(os.path, 'exists', fake_exists) + self.stubs.Set(utils, 'execute', fake_execute) + + def test_same_fname_concurrency(self): + """Ensures that the same fname cache runs at a sequentially""" + conn = libvirt_conn.get_connection(False) + wait1 = eventlet.event.Event() + done1 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname', False, wait1, done1) + wait2 = eventlet.event.Event() + done2 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname', False, wait2, done2) + wait2.send() + eventlet.sleep(0) + try: + self.assertFalse(done2.ready()) + finally: + wait1.send() + done1.wait() + eventlet.sleep(0) + self.assertTrue(done2.ready()) + + def test_different_fname_concurrency(self): + """Ensures that two different fname caches are concurrent""" + conn = libvirt_conn.get_connection(False) + wait1 = eventlet.event.Event() + done1 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname2', False, wait1, done1) + wait2 = eventlet.event.Event() + done2 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname1', False, wait2, done2) + wait2.send() + eventlet.sleep(0) + try: + self.assertTrue(done2.ready()) + finally: + wait1.send() + eventlet.sleep(0) + + class LibvirtConnTestCase(test.TestCase): def setUp(self): super(LibvirtConnTestCase, self).setUp() diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9f7315c17..1a1f146d4 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,9 +44,8 @@ import uuid from xml.dom import minidom -from eventlet import greenthread -from eventlet import event from eventlet import tpool +from eventlet import semaphore import IPy @@ -512,6 +511,8 @@ class LibvirtConnection(object): subprocess.Popen(cmd, shell=True) return {'token': token, 'host': host, 'port': port} + _image_semaphores = {} + def _cache_image(self, fn, target, fname, cow=False, *args, **kwargs): """Wrapper for a method that creates an image that caches the image. @@ -531,8 +532,13 @@ class LibvirtConnection(object): if not os.path.exists(base_dir): os.mkdir(base_dir) base = os.path.join(base_dir, fname) - if not os.path.exists(base): - fn(target=base, *args, **kwargs) + + if fname not in self._image_semaphores: + self._image_semaphores[fname] = semaphore.Semaphore() + with self._image_semaphores[fname]: + if not os.path.exists(base): + fn(target=base, *args, **kwargs) + if cow: utils.execute('qemu-img create -f qcow2 -o ' 'cluster_size=2M,backing_file=%s %s' -- cgit From ddeab2da30bb2f74109854d982c6681e78e7a4ce Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 9 Mar 2011 07:35:58 +0000 Subject: make static method for testing without initializing libvirt --- nova/tests/test_virt.py | 4 ++-- nova/virt/libvirt_conn.py | 11 ++++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index ec7498d72..113632a0c 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -57,7 +57,7 @@ class CacheConcurrencyTestCase(test.TestCase): def test_same_fname_concurrency(self): """Ensures that the same fname cache runs at a sequentially""" - conn = libvirt_conn.get_connection(False) + conn = libvirt_conn.LibvirtConnection wait1 = eventlet.event.Event() done1 = eventlet.event.Event() eventlet.spawn(conn._cache_image, _concurrency, @@ -78,7 +78,7 @@ class CacheConcurrencyTestCase(test.TestCase): def test_different_fname_concurrency(self): """Ensures that two different fname caches are concurrent""" - conn = libvirt_conn.get_connection(False) + conn = libvirt_conn.LibvirtConnection wait1 = eventlet.event.Event() done1 = eventlet.event.Event() eventlet.spawn(conn._cache_image, _concurrency, diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 1a1f146d4..ecef7950a 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -511,9 +511,10 @@ class LibvirtConnection(object): subprocess.Popen(cmd, shell=True) return {'token': token, 'host': host, 'port': port} - _image_semaphores = {} + _image_sems = {} - def _cache_image(self, fn, target, fname, cow=False, *args, **kwargs): + @staticmethod + def _cache_image(fn, target, fname, cow=False, *args, **kwargs): """Wrapper for a method that creates an image that caches the image. This wrapper will save the image into a common store and create a @@ -533,9 +534,9 @@ class LibvirtConnection(object): os.mkdir(base_dir) base = os.path.join(base_dir, fname) - if fname not in self._image_semaphores: - self._image_semaphores[fname] = semaphore.Semaphore() - with self._image_semaphores[fname]: + if fname not in LibvirtConnection._image_sems: + LibvirtConnection._image_sems[fname] = semaphore.Semaphore() + with LibvirtConnection._image_sems[fname]: if not os.path.exists(base): fn(target=base, *args, **kwargs) -- cgit From 7d31fe9ef316f49379818259a55a84deb5b850cd Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 9 Mar 2011 10:30:18 +0100 Subject: Stop assuming anything about the order in which the two processes are scheduled. --- nova/tests/test_misc.py | 47 +++++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index 9f572b58e..a658e4978 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -71,30 +71,33 @@ class LockTestCase(test.TestCase): "got mangled") def test_synchronized(self): - rpipe, wpipe = os.pipe() + rpipe1, wpipe1 = os.pipe() + rpipe2, wpipe2 = os.pipe() + + @synchronized('testlock') + def f(rpipe, wpipe): + try: + os.write(wpipe, "foo") + except OSError, e: + self.assertEquals(e.errno, errno.EPIPE) + return + + rfds, _, __ = select.select([rpipe], [], [], 1) + self.assertEquals(len(rfds), 0, "The other process, which was" + " supposed to be locked, " + "wrote on its end of the " + "pipe") + os.close(rpipe) + pid = os.fork() if pid > 0: - os.close(wpipe) - - @synchronized('testlock') - def f(): - rfds, _, __ = select.select([rpipe], [], [], 1) - self.assertEquals(len(rfds), 0, "The other process, which was" - " supposed to be locked, " - "wrote on its end of the " - "pipe") - os.close(rpipe) - - f() + os.close(wpipe1) + os.close(rpipe2) + + f(rpipe1, wpipe2) else: - os.close(rpipe) + os.close(rpipe1) + os.close(wpipe2) - @synchronized('testlock') - def g(): - try: - os.write(wpipe, "foo") - except OSError, e: - self.assertEquals(e.errno, errno.EPIPE) - return - g() + f(rpipe2, wpipe1) os._exit(0) -- cgit From 0f45b59ca6f9502a3ae6578e2fca5a7d9575ae5e Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 9 Mar 2011 10:37:21 -0500 Subject: Added 'adminPass' to the serialization_metadata. --- nova/api/openstack/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 9581b8477..bbedd7c63 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -96,7 +96,7 @@ class Controller(wsgi.Controller): 'application/xml': { "attributes": { "server": ["id", "imageId", "name", "flavorId", "hostId", - "status", "progress"]}}} + "status", "progress", "adminPass"]}}} def __init__(self): self.compute_api = compute.API() -- cgit From eadce208c55513ddbab550898e641b8ee55a67ec Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 9 Mar 2011 12:32:15 -0500 Subject: Fix spacing. --- nova/api/openstack/servers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index bbedd7c63..7222285e0 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -84,11 +84,13 @@ def _translate_detail_keys(inst): return dict(server=inst_dict) + def _translate_keys(inst): """ Coerces into dictionary format, excluding all model attributes save for id and name """ return dict(server=dict(id=inst['id'], name=inst['display_name'])) + class Controller(wsgi.Controller): """ The Server API controller for the OpenStack API """ -- cgit From e17876ec002f976572b6ac102dc113024669a45c Mon Sep 17 00:00:00 2001 From: Ricardo Carrillo Cruz Date: Wed, 9 Mar 2011 18:57:53 +0100 Subject: fixed lp715427 --- nova/network/manager.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nova/network/manager.py b/nova/network/manager.py index b36dd59cf..39da031eb 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -563,6 +563,16 @@ class VlanManager(NetworkManager): # NOTE(vish): This makes ports unique accross the cloud, a more # robust solution would be to make them unique per ip net['vpn_public_port'] = vpn_start + index + network_ref = None + try: + network_ref = db.network_get_by_cidr(context, cidr) + except exception.NotFound: + pass + + if network_ref is not None: + raise ValueError(_('Network with cidr %s already exists' % + cidr)) + network_ref = self.db.network_create_safe(context, net) if network_ref: self._create_fixed_ips(context, network_ref['id']) -- cgit From 48c8b911899db4db36dfc2e0ddaf3410c3179071 Mon Sep 17 00:00:00 2001 From: Ricardo Carrillo Cruz Date: Wed, 9 Mar 2011 19:03:58 +0100 Subject: fixed lp715427 --- bin/nova-manage | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 8ddfea5c2..45437d7e7 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -547,10 +547,11 @@ class NetworkCommands(object): def delete(self, fixed_range): """Deletes a network""" - network = db.network_get_by_cidr(context.get_admin_context(), fixed_range) + network = db.network_get_by_cidr(context.get_admin_context(), \ + fixed_range) if network.project_id is not None: - raise ValueError(_('Network must be disassociated from project %s' - ' before delete' %network.project_id)) + raise ValueError(_('Network must be disassociated from project %s' + ' before delete' % network.project_id)) db.network_delete_safe(context.get_admin_context(), network.id) class ServiceCommands(object): -- cgit From e44f085ed464a3397e3bf89a3e5355e538c71a65 Mon Sep 17 00:00:00 2001 From: Ricardo Carrillo Cruz Date: Wed, 9 Mar 2011 19:16:26 +0100 Subject: Fixed pep8 issues --- bin/nova-manage | 5 +++-- nova/db/api.py | 7 +++++-- nova/db/sqlalchemy/api.py | 7 +++++-- nova/network/manager.py | 4 ++-- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 45437d7e7..7dfc3c045 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -552,8 +552,9 @@ class NetworkCommands(object): if network.project_id is not None: raise ValueError(_('Network must be disassociated from project %s' ' before delete' % network.project_id)) - db.network_delete_safe(context.get_admin_context(), network.id) - + db.network_delete_safe(context.get_admin_context(), network.id) + + class ServiceCommands(object): """Enable and disable running services""" diff --git a/nova/db/api.py b/nova/db/api.py index 7ad99c1f4..5c34a02e4 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -459,6 +459,7 @@ def network_associate(context, project_id): """Associate a free network to a project.""" return IMPL.network_associate(context, project_id) + def network_count(context): """Return the number of networks.""" return IMPL.network_count(context) @@ -488,9 +489,9 @@ def network_create_safe(context, values): """ return IMPL.network_create_safe(context, values) + def network_delete_safe(context, network_id): - """Delete network with key network_id - + """Delete network with key network_id. This method assumes that the network is not associated with any project """ return IMPL.network_delete_safe(context, network_id) @@ -531,10 +532,12 @@ def network_get_by_bridge(context, bridge): """Get a network by bridge or raise if it does not exist.""" return IMPL.network_get_by_bridge(context, bridge) + def network_get_by_cidr(context, cidr): """Get a network by cidr or raise if it does not exist""" return IMPL.network_get_by_cidr(context, cidr) + def network_get_by_instance(context, instance_id): """Get a network by instance id or raise if it does not exist.""" return IMPL.network_get_by_instance(context, instance_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 90730d325..3a1162a17 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1042,12 +1042,14 @@ def network_create_safe(context, values): except IntegrityError: return None + @require_admin_context def network_delete_safe(context, network_id): session = get_session() with session.begin(): - network_ref = network_get(context, network_id=network_id, session=session) - session.delete(network_ref) + network_ref = network_get(context, network_id=network_id, \ + session=session) + session.delete(network_ref) @require_admin_context @@ -1134,6 +1136,7 @@ def network_get_by_cidr(context, cidr): cidr) return result + @require_admin_context def network_get_by_instance(_context, instance_id): session = get_session() diff --git a/nova/network/manager.py b/nova/network/manager.py index 39da031eb..3dfc48934 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -568,11 +568,11 @@ class VlanManager(NetworkManager): network_ref = db.network_get_by_cidr(context, cidr) except exception.NotFound: pass - + if network_ref is not None: raise ValueError(_('Network with cidr %s already exists' % cidr)) - + network_ref = self.db.network_create_safe(context, net) if network_ref: self._create_fixed_ips(context, network_ref['id']) -- cgit From 23369a63f4b74fb64bf57554a3fd8b15e3e2b49c Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Wed, 9 Mar 2011 14:31:23 -0500 Subject: Fixes uses of process_input --- nova/utils.py | 4 ++-- nova/virt/disk.py | 4 ++-- nova/virt/libvirt_conn.py | 11 ++++------- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index 7ddf056ea..0937522ec 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -131,7 +131,7 @@ def fetchfile(url, target): def execute(*cmd, **kwargs): process_input=kwargs.get('process_input', None) addl_env=kwargs.get('addl_env', None) - check_exit_code=kwargs.get('check_exit_code', True) + check_exit_code=kwargs.get('check_exit_code', 0) stdin=kwargs.get('stdin', subprocess.PIPE) stdout=kwargs.get('stdout', subprocess.PIPE) stderr=kwargs.get('stderr', subprocess.PIPE) @@ -151,7 +151,7 @@ def execute(*cmd, **kwargs): obj.stdin.close() if obj.returncode: LOG.debug(_("Result was %s") % obj.returncode) - if check_exit_code and obj.returncode != 0: + if check_exit_code is not None and obj.returncode != check_exit_code: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, stdout=stdout, diff --git a/nova/virt/disk.py b/nova/virt/disk.py index 203517275..a54cda003 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -175,8 +175,8 @@ def _inject_key_into_fs(key, fs): utils.execute('sudo', 'chown', 'root', sshdir) utils.execute('sudo', 'chmod', '700', sshdir) keyfile = os.path.join(sshdir, 'authorized_keys') - # TODO:EWINDISCH: not sure about the following /w execv patch - utils.execute('sudo', 'tee', '-a', keyfile, '\n' + key.strip() + '\n') + utils.execute('sudo', 'tee', '-a', keyfile, + process_input='\n' + key.strip() + '\n') def _inject_net_into_fs(net, fs): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 76f31f91a..6b555ecbb 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -485,13 +485,10 @@ class LibvirtConnection(object): port = random.randint(int(start_port), int(end_port)) # netcat will exit with 0 only if the port is in use, # so a nonzero return value implies it is unused - - # TODO(ewindisch): broken /w execvp patch. - # subprocess lets us do this, but utils.execute - # abstracts it away from us - cmd = 'netcat', '0.0.0.0', port, '-w', '1', ' Date: Wed, 9 Mar 2011 14:53:44 -0500 Subject: Add password parameter to the set_admin_password call in the compute api. Updated servers password to use this parameter. --- nova/api/openstack/servers.py | 3 ++- nova/compute/api.py | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 7222285e0..41166f810 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -183,7 +183,8 @@ class Controller(wsgi.Controller): password = "%s%s" % (server['server']['name'][:4], utils.generate_password(12)) server['server']['adminPass'] = password - self.compute_api.set_admin_password(context, server['server']['id']) + self.compute_api.set_admin_password(context, server['server']['id'], + password) return server def update(self, req, id): diff --git a/nova/compute/api.py b/nova/compute/api.py index 33d25fc4b..a0bb2cf04 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -498,9 +498,10 @@ class API(base.Base): """Unrescue the given instance.""" self._cast_compute_message('unrescue_instance', context, instance_id) - def set_admin_password(self, context, instance_id): + def set_admin_password(self, context, instance_id, password=None): """Set the root/admin password for the given instance.""" - self._cast_compute_message('set_admin_password', context, instance_id) + self._cast_compute_message('set_admin_password', context, instance_id, + password) def inject_file(self, context, instance_id): """Write a file to the given instance.""" -- cgit From 3f723bcf54b4d779c66373dc8f69f43923dd586a Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 9 Mar 2011 15:08:11 -0500 Subject: renaming wsgi.Request.best_match to best_match_content_type; correcting calls to that function in code from trunk --- nova/api/direct.py | 2 +- nova/api/openstack/__init__.py | 2 +- nova/api/openstack/faults.py | 2 +- nova/api/openstack/servers.py | 2 +- nova/tests/api/openstack/common.py | 1 + nova/tests/api/test_wsgi.py | 18 +++++++++--------- nova/wsgi.py | 4 ++-- 7 files changed, 16 insertions(+), 15 deletions(-) diff --git a/nova/api/direct.py b/nova/api/direct.py index 1d699f947..dfca250e0 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -206,7 +206,7 @@ class ServiceWrapper(wsgi.Controller): params = dict([(str(k), v) for (k, v) in params.iteritems()]) result = method(context, **params) if type(result) is dict or type(result) is list: - return self._serialize(result, req.best_match()) + return self._serialize(result, req.best_match_content_type()) else: return result diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 6e1a2a06c..197fcc619 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -125,5 +125,5 @@ class Versions(wsgi.Application): "application/xml": { "attributes": dict(version=["status", "id"])}} - content_type = req.best_match() + content_type = req.best_match_content_type() return wsgi.Serializer(metadata).serialize(response, content_type) diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 075fdb997..2fd733299 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -58,6 +58,6 @@ class Fault(webob.exc.HTTPException): # 'code' is an attribute on the fault tag itself metadata = {'application/xml': {'attributes': {fault_name: 'code'}}} serializer = wsgi.Serializer(metadata) - content_type = req.best_match() + content_type = req.best_match_content_type() self.wrapped_exc.body = serializer.serialize(fault_data, content_type) return self.wrapped_exc diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8dd078a31..25c667532 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -217,7 +217,7 @@ class Controller(wsgi.Controller): 'rebuild': self._action_rebuild, } - input_dict = self._deserialize(req.body, req) + input_dict = self._deserialize(req.body, req.get_content_type()) for key in actions.keys(): if key in input_dict: return actions[key](input_dict, req, id) diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py index 3f9c7d3cf..74bb8729a 100644 --- a/nova/tests/api/openstack/common.py +++ b/nova/tests/api/openstack/common.py @@ -28,6 +28,7 @@ def webob_factory(url): def web_request(url, method=None, body=None): req = webob.Request.blank("%s%s" % (base_url, url)) if method: + req.content_type = "application/json" req.method = method if body: req.body = json.dumps(body) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 7c0135656..b1a849cf9 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -139,48 +139,48 @@ class RequestTest(test.TestCase): def test_content_type_from_accept_xml(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml" - result = request.best_match() + result = request.best_match_content_type() self.assertEqual(result, "application/xml") request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/json" - result = request.best_match() + result = request.best_match_content_type() self.assertEqual(result, "application/json") request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" - result = request.best_match() + result = request.best_match_content_type() self.assertEqual(result, "application/json") request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = \ "application/json; q=0.3, application/xml; q=0.9" - result = request.best_match() + result = request.best_match_content_type() self.assertEqual(result, "application/xml") def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.xml') - result = request.best_match() + result = request.best_match_content_type() self.assertEqual(result, "application/xml") request = wsgi.Request.blank('/tests/123.json') - result = request.best_match() + result = request.best_match_content_type() self.assertEqual(result, "application/json") request = wsgi.Request.blank('/tests/123.invalid') - result = request.best_match() + result = request.best_match_content_type() self.assertEqual(result, "application/json") def test_content_type_accept_and_query_extension(self): request = wsgi.Request.blank('/tests/123.xml') request.headers["Accept"] = "application/json" - result = request.best_match() + result = request.best_match_content_type() self.assertEqual(result, "application/xml") def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" - result = request.best_match() + result = request.best_match_content_type() self.assertEqual(result, "application/json") diff --git a/nova/wsgi.py b/nova/wsgi.py index c3e08522d..2d18da8fb 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -85,7 +85,7 @@ class Server(object): class Request(webob.Request): - def best_match(self): + def best_match_content_type(self): """ Determine the most acceptable content-type based on the query extension then the Accept header @@ -354,7 +354,7 @@ class Controller(object): result = method(**arg_dict) if type(result) is dict: - content_type = req.best_match() + content_type = req.best_match_content_type() body = self._serialize(result, content_type) response = webob.Response() -- cgit From fc9840bae6200c8f89fb8a3ba0ab45663c872b3c Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Wed, 9 Mar 2011 15:33:20 -0500 Subject: execvp passes pep8 --- nova/console/xvp.py | 6 +++--- nova/crypto.py | 3 ++- nova/network/linux_net.py | 19 ++++++++++++------- nova/tests/test_virt.py | 2 +- nova/utils.py | 19 ++++++++++--------- nova/volume/driver.py | 4 ++-- 6 files changed, 30 insertions(+), 23 deletions(-) diff --git a/nova/console/xvp.py b/nova/console/xvp.py index 271dffa54..68d8c8565 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -134,9 +134,9 @@ class XVPConsoleProxy(object): logging.debug(_("Starting xvp")) try: utils.execute('xvp', - '-p',FLAGS.console_xvp_pid, - '-c',FLAGS.console_xvp_conf, - '-l',FLAGS.console_xvp_log) + '-p', FLAGS.console_xvp_pid, + '-c', FLAGS.console_xvp_conf, + '-l', FLAGS.console_xvp_log) except exception.ProcessExecutionError, err: logging.error(_("Error starting xvp: %s") % err) diff --git a/nova/crypto.py b/nova/crypto.py index 20bb570a5..717ea0041 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -120,7 +120,8 @@ def generate_key_pair(bits=1024): # bio = M2Crypto.BIO.MemoryBuffer() # key.save_pub_key_bio(bio) # public_key = bio.read() - # public_key, err = execute('ssh-keygen', '-y', '-f', '/dev/stdin', private_key) + # public_key, err = execute('ssh-keygen', '-y', '-f', + # '/dev/stdin', private_key) return (private_key, public_key, fingerprint) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index b66a1adb7..228a4d9ea 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -68,7 +68,8 @@ def metadata_forward(): _confirm_rule("PREROUTING", '-t', 'nat', '-s', '0.0.0.0/0', '-d', '169.254.169.254/32', '-p', 'tcp', '-m', 'tcp', '--dport', '80', '-j', 'DNAT', - '--to-destination', '%s:%s' % (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) + '--to-destination', + '%s:%s' % (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) def init_host(): @@ -86,7 +87,8 @@ def init_host(): _execute('sudo', 'iptables', '-D', 'FORWARD', '-j', 'nova_forward', check_exit_code=False) _execute('sudo', 'iptables', '-A', 'FORWARD', '-j', 'nova_forward') - _execute('sudo', 'iptables', '-N', 'nova_output', check_exit_code=False) + _execute('sudo', 'iptables', '-N', 'nova_output', + check_exit_code=False) _execute('sudo', 'iptables', '-D', 'OUTPUT', '-j', 'nova_output', check_exit_code=False) _execute('sudo', 'iptables', '-A', 'OUTPUT', '-j', 'nova_output') @@ -220,7 +222,7 @@ def ensure_bridge(bridge, interface, net_attrs=None): # bridge for it to respond to reqests properly suffix = net_attrs['cidr'].rpartition('/')[2] out, err = _execute('sudo', 'ip', 'addr', 'add', - "%s/%s" % + "%s/%s" % (net_attrs['gateway'], suffix), 'brd', net_attrs['broadcast'], @@ -237,7 +239,8 @@ def ensure_bridge(bridge, interface, net_attrs=None): # bridge, then the bridge has to be in promiscuous # to forward packets properly. if(FLAGS.public_interface == bridge): - _execute('sudo', 'ip', 'link', 'set', 'dev', bridge, 'promisc', 'on') + _execute('sudo', 'ip', 'link', 'set', + 'dev', bridge, 'promisc', 'on') if interface: # NOTE(vish): This will break if there is already an ip on the # interface, so we move any ips to the bridge @@ -253,8 +256,10 @@ def ensure_bridge(bridge, interface, net_attrs=None): fields = line.split() if fields and fields[0] == "inet": params = ' '.join(fields[1:-1]) - _execute('sudo', 'ip', 'addr', 'del', params, 'dev', fields[-1]) - _execute('sudo', 'ip', 'addr', 'add', params, 'dev', bridge) + _execute('sudo', 'ip', 'addr', + 'del', params, 'dev', fields[-1]) + _execute('sudo', 'ip', 'addr', + 'add', params, 'dev', bridge) if gateway: _execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway) out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, @@ -397,7 +402,7 @@ def _device_exists(device): def _confirm_rule(chain, *cmd, **kwargs): - append=kwargs.get('append',False) + append = kwargs.get('append', False) """Delete and re-add iptables rule""" if FLAGS.use_nova_chains: chain = "nova_%s" % chain.lower() diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 7f1ad002e..dfa607f14 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -316,7 +316,7 @@ class IptablesFirewallTestCase(test.TestCase): # self.fw.add_instance(instance_ref) def fake_iptables_execute(*cmd, **kwargs): - process_input=kwargs.get('process_input', None) + process_input = kwargs.get('process_input', None) if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'): return '\n'.join(self.in6_rules), None if cmd == ('sudo', 'iptables-save', '-t', 'filter'): diff --git a/nova/utils.py b/nova/utils.py index 0937522ec..3a4ec3c6a 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -40,7 +40,7 @@ import netaddr from eventlet import event from eventlet import greenthread from eventlet.green import subprocess - +None from nova import exception from nova.exception import ProcessExecutionError from nova import log as logging @@ -129,13 +129,13 @@ def fetchfile(url, target): def execute(*cmd, **kwargs): - process_input=kwargs.get('process_input', None) - addl_env=kwargs.get('addl_env', None) - check_exit_code=kwargs.get('check_exit_code', 0) - stdin=kwargs.get('stdin', subprocess.PIPE) - stdout=kwargs.get('stdout', subprocess.PIPE) - stderr=kwargs.get('stderr', subprocess.PIPE) - cmd=map(str,cmd) + process_input = kwargs.get('process_input', None) + addl_env = kwargs.get('addl_env', None) + check_exit_code = kwargs.get('check_exit_code', 0) + stdin = kwargs.get('stdin', subprocess.PIPE) + stdout = kwargs.get('stdout', subprocess.PIPE) + stderr = kwargs.get('stderr', subprocess.PIPE) + cmd = map(str, cmd) LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd)) env = os.environ.copy() @@ -151,7 +151,8 @@ def execute(*cmd, **kwargs): obj.stdin.close() if obj.returncode: LOG.debug(_("Result was %s") % obj.returncode) - if check_exit_code is not None and obj.returncode != check_exit_code: + if type(check_exit_code) == types.IntType \ + and obj.returncode != check_exit_code: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, stdout=stdout, diff --git a/nova/volume/driver.py b/nova/volume/driver.py index e9bdf162f..45cc800e7 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -112,7 +112,7 @@ class VolumeDriver(object): # If the volume isn't present, then don't attempt to delete return True - self._try_execute('sudo', 'lvremove', '-f',"%s/%s" % + self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % (FLAGS.volume_group, volume['name'])) @@ -256,7 +256,7 @@ class ISCSIDriver(VolumeDriver): self._sync_exec('sudo', 'ietadm', '--op', 'new', "--tid=%s" % iscsi_target, '--params', - "Name=%s" % iscsi-name, + "Name=%s" % iscsi_name, check_exit_code=False) self._sync_exec('sudo', 'ietadm', '--op', 'new', "--tid=%s" % iscsi_target, -- cgit From 3e61bf9963d7e98e8152d2eacfc4461d8cda309c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 9 Mar 2011 21:43:35 +0000 Subject: remove the semaphore when there is no one waiting on it --- nova/tests/test_virt.py | 3 ++- nova/virt/libvirt_conn.py | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 113632a0c..56a271365 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -23,7 +23,6 @@ from xml.dom.minidom import parseString as xml_to_dom from nova import context from nova import db from nova import flags -from nova import log as logging from nova import test from nova import utils from nova.api.ec2 import cloud @@ -70,11 +69,13 @@ class CacheConcurrencyTestCase(test.TestCase): eventlet.sleep(0) try: self.assertFalse(done2.ready()) + self.assertTrue('fname' in conn._image_sems) finally: wait1.send() done1.wait() eventlet.sleep(0) self.assertTrue(done2.ready()) + self.assertFalse('fname' in conn._image_sems) def test_different_fname_concurrency(self): """Ensures that two different fname caches are concurrent""" diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ecef7950a..69249ed57 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -539,6 +539,8 @@ class LibvirtConnection(object): with LibvirtConnection._image_sems[fname]: if not os.path.exists(base): fn(target=base, *args, **kwargs) + if not LibvirtConnection._image_sems[fname].locked(): + del LibvirtConnection._image_sems[fname] if cow: utils.execute('qemu-img create -f qcow2 -o ' -- cgit From e8554da80ac916f168461cb48078488700081c02 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Wed, 9 Mar 2011 16:44:48 -0500 Subject: execvp: cleanup. --- nova/crypto.py | 6 +++--- .../networking/etc/xensource/scripts/vif_rules.py | 22 +++++++++++----------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/nova/crypto.py b/nova/crypto.py index 717ea0041..2a8d4abca 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -105,7 +105,7 @@ def generate_key_pair(bits=1024): tmpdir = tempfile.mkdtemp() keyfile = os.path.join(tmpdir, 'temp') - utils.execute('ssh-keygen', '-q', '-b', '%d' % bits, '-N', '', + utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '', '-f', keyfile) (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', '%s.pub' % (keyfile)) @@ -147,9 +147,9 @@ def revoke_cert(project_id, file_name): os.chdir(ca_folder(project_id)) # NOTE(vish): potential race condition here utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke', - '%s' % file_name) + file_name) utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf', - '-out', '%s' % FLAGS.crl_file) + '-out', FLAGS.crl_file) os.chdir(start) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index 2c34f7b1d..d2b2d61e6 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -52,7 +52,7 @@ def main(dom_id, command, only_this_vif=None): apply_iptables_rules(command, params) -def execute(command, return_stdout=False): +def execute(*command, return_stdout=False): devnull = open(os.devnull, 'w') proc = subprocess.Popen(command, close_fds=True, stdout=subprocess.PIPE, stderr=devnull) @@ -110,26 +110,26 @@ def apply_arptables_rules(command, params): def apply_ebtables_rules(command, params): ebtables = lambda *rule: execute("/sbin/ebtables", *rule) - ebtables('-D', 'FORWARD', '-p', '0806', '-o', '%(VIF)s' % params, - '--arp-ip-dst', '%(IP)s' % params, + ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'], + '--arp-ip-dst', params['IP'], '-j', 'ACCEPT') ebtables('-D', 'FORWARD', '-p', '0800', '-o', - '%(VIF)s' % params, '--ip-dst', '%(IP)s' % params, + params['VIF'], '--ip-dst', params['IP'], '-j', 'ACCEPT') if command == 'online': ebtables('-A', 'FORWARD', '-p', '0806', - '-o', '%(VIF)s' % params - '--arp-ip-dst', '%(IP)s' % params, + '-o', params['VIF'], + '--arp-ip-dst', params['IP'], '-j', 'ACCEPT') ebtables('-A', 'FORWARD', '-p', '0800', - '-o', '%(VIF)s' % params, - '--ip-dst', '%(IP)s' % params, + '-o', params['VIF'], + '--ip-dst', params['IP'], '-j', 'ACCEPT') - ebtables('-D', 'FORWARD', '-s', '!', '%(MAC)s' % params, - '-i', '%(VIF)s' % params, '-j', 'DROP') + ebtables('-D', 'FORWARD', '-s', '!', params['MAC'], + '-i', params['VIF'], '-j', 'DROP') if command == 'online': - ebtables('-I', 'FORWARD', '1', '-s', '!', '%(MAC)s' % params, + ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'], '-i', '%(VIF)s', '-j', 'DROP') -- cgit From 5f6a58c7c2a7359f67bc4e2c2eb6bb9cc0a9ff01 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Wed, 9 Mar 2011 17:22:54 -0500 Subject: execvp: fix docs --- doc/ext/nova_autodoc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/ext/nova_autodoc.py b/doc/ext/nova_autodoc.py index 5429bb656..3dd992d84 100644 --- a/doc/ext/nova_autodoc.py +++ b/doc/ext/nova_autodoc.py @@ -8,5 +8,6 @@ from nova import utils def setup(app): rootdir = os.path.abspath(app.srcdir + '/..') print "**Autodocumenting from %s" % rootdir - rv = utils.execute('cd %s && ./generate_autodoc_index.sh' % rootdir) + os.chdir(rootdir) + rv = utils.execute('./generate_autodoc_index.sh') print rv[0] -- cgit